2024-11-22 18:49:35,283 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-22 18:49:35,294 main DEBUG Took 0.009345 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-22 18:49:35,294 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-22 18:49:35,295 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-22 18:49:35,295 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-22 18:49:35,297 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,303 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-22 18:49:35,314 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,316 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,316 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,317 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,317 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,317 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,318 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,319 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,319 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,320 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,320 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,320 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,321 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,322 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,322 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,322 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,323 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,323 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,323 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,324 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,324 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 18:49:35,324 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,325 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-22 18:49:35,326 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 18:49:35,327 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-22 18:49:35,329 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-22 18:49:35,329 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-22 18:49:35,330 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-22 18:49:35,330 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-22 18:49:35,338 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-22 18:49:35,341 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-22 18:49:35,342 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-22 18:49:35,342 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-22 18:49:35,343 main DEBUG createAppenders(={Console}) 2024-11-22 18:49:35,344 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-22 18:49:35,344 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-22 18:49:35,344 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-22 18:49:35,345 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-22 18:49:35,345 main DEBUG OutputStream closed 2024-11-22 18:49:35,345 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-22 18:49:35,345 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-22 18:49:35,346 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-22 18:49:35,409 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-22 18:49:35,411 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-22 18:49:35,412 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-22 18:49:35,414 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-22 18:49:35,414 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-22 18:49:35,414 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-22 18:49:35,415 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-22 18:49:35,415 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-22 18:49:35,415 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-22 18:49:35,415 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-22 18:49:35,416 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-22 18:49:35,416 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-22 18:49:35,416 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-22 18:49:35,417 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-22 18:49:35,417 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-22 18:49:35,417 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-22 18:49:35,417 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-22 18:49:35,418 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-22 18:49:35,420 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22 18:49:35,420 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-22 18:49:35,421 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-22 18:49:35,421 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-22T18:49:35,661 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37 2024-11-22 18:49:35,664 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-22 18:49:35,664 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22T18:49:35,674 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-22T18:49:35,710 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=163, ProcessCount=11, AvailableMemoryMB=8537 2024-11-22T18:49:35,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T18:49:35,734 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438, deleteOnExit=true 2024-11-22T18:49:35,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T18:49:35,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/test.cache.data in system properties and HBase conf 2024-11-22T18:49:35,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T18:49:35,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/hadoop.log.dir in system properties and HBase conf 2024-11-22T18:49:35,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T18:49:35,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T18:49:35,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T18:49:35,824 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-22T18:49:35,908 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T18:49:35,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:49:35,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:49:35,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T18:49:35,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:49:35,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T18:49:35,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T18:49:35,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:49:35,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:49:35,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T18:49:35,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/nfs.dump.dir in system properties and HBase conf 2024-11-22T18:49:35,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/java.io.tmpdir in system properties and HBase conf 2024-11-22T18:49:35,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:49:35,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T18:49:35,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T18:49:36,401 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:49:36,758 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-22T18:49:36,834 INFO [Time-limited test {}] log.Log(170): Logging initialized @2222ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-22T18:49:36,930 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:49:37,005 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:49:37,026 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:49:37,026 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:49:37,027 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:49:37,043 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:49:37,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:49:37,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:49:37,243 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/java.io.tmpdir/jetty-localhost-40283-hadoop-hdfs-3_4_1-tests_jar-_-any-14399638075300078331/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:49:37,250 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:40283} 2024-11-22T18:49:37,250 INFO [Time-limited test {}] server.Server(415): Started @2639ms 2024-11-22T18:49:37,281 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:49:37,643 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:49:37,649 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:49:37,651 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:49:37,651 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:49:37,651 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:49:37,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:49:37,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:49:37,774 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/java.io.tmpdir/jetty-localhost-42495-hadoop-hdfs-3_4_1-tests_jar-_-any-18249313104884377945/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:49:37,775 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:42495} 2024-11-22T18:49:37,775 INFO [Time-limited test {}] server.Server(415): Started @3164ms 2024-11-22T18:49:37,830 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:49:37,949 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:49:37,956 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:49:37,958 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:49:37,958 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:49:37,958 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:49:37,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:49:37,962 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:49:38,107 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/java.io.tmpdir/jetty-localhost-39333-hadoop-hdfs-3_4_1-tests_jar-_-any-14027016318680071515/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:49:38,108 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:39333} 2024-11-22T18:49:38,108 INFO [Time-limited test {}] server.Server(415): Started @3497ms 2024-11-22T18:49:38,111 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:49:38,278 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/data/data3/current/BP-2061863200-172.17.0.2-1732301376499/current, will proceed with Du for space computation calculation, 2024-11-22T18:49:38,278 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/data/data4/current/BP-2061863200-172.17.0.2-1732301376499/current, will proceed with Du for space computation calculation, 2024-11-22T18:49:38,278 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/data/data2/current/BP-2061863200-172.17.0.2-1732301376499/current, will proceed with Du for space computation calculation, 2024-11-22T18:49:38,278 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/data/data1/current/BP-2061863200-172.17.0.2-1732301376499/current, will proceed with Du for space computation calculation, 2024-11-22T18:49:38,327 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:49:38,327 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:49:38,399 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f296a1bd8f39743 with lease ID 0xb852cd6f6c48a3d: Processing first storage report for DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd from datanode DatanodeRegistration(127.0.0.1:46333, datanodeUuid=4bddbb0b-1525-4d24-96c5-a71db291950f, infoPort=35081, infoSecurePort=0, ipcPort=42999, storageInfo=lv=-57;cid=testClusterID;nsid=177438035;c=1732301376499) 2024-11-22T18:49:38,400 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f296a1bd8f39743 with lease ID 0xb852cd6f6c48a3d: from storage DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd node DatanodeRegistration(127.0.0.1:46333, datanodeUuid=4bddbb0b-1525-4d24-96c5-a71db291950f, infoPort=35081, infoSecurePort=0, ipcPort=42999, storageInfo=lv=-57;cid=testClusterID;nsid=177438035;c=1732301376499), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T18:49:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb1cebc0407b43f0 with lease ID 0xb852cd6f6c48a3e: Processing first storage report for DS-9469d354-99cd-4bc5-8079-98b94f6baea7 from datanode DatanodeRegistration(127.0.0.1:46673, datanodeUuid=85117859-41c5-4d03-b7cc-6f4b75054e37, infoPort=46595, infoSecurePort=0, ipcPort=38557, storageInfo=lv=-57;cid=testClusterID;nsid=177438035;c=1732301376499) 2024-11-22T18:49:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb1cebc0407b43f0 with lease ID 0xb852cd6f6c48a3e: from storage DS-9469d354-99cd-4bc5-8079-98b94f6baea7 node DatanodeRegistration(127.0.0.1:46673, datanodeUuid=85117859-41c5-4d03-b7cc-6f4b75054e37, infoPort=46595, infoSecurePort=0, ipcPort=38557, storageInfo=lv=-57;cid=testClusterID;nsid=177438035;c=1732301376499), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:49:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f296a1bd8f39743 with lease ID 0xb852cd6f6c48a3d: Processing first storage report for DS-5c285830-cf1b-4a4f-ab42-8066e421fe62 from datanode DatanodeRegistration(127.0.0.1:46333, datanodeUuid=4bddbb0b-1525-4d24-96c5-a71db291950f, infoPort=35081, infoSecurePort=0, ipcPort=42999, storageInfo=lv=-57;cid=testClusterID;nsid=177438035;c=1732301376499) 2024-11-22T18:49:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f296a1bd8f39743 with lease ID 0xb852cd6f6c48a3d: from storage DS-5c285830-cf1b-4a4f-ab42-8066e421fe62 node DatanodeRegistration(127.0.0.1:46333, datanodeUuid=4bddbb0b-1525-4d24-96c5-a71db291950f, infoPort=35081, infoSecurePort=0, ipcPort=42999, storageInfo=lv=-57;cid=testClusterID;nsid=177438035;c=1732301376499), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:49:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb1cebc0407b43f0 with lease ID 0xb852cd6f6c48a3e: Processing first storage report for DS-d27d05f5-3551-4666-b56d-d89ce82be1bb from datanode DatanodeRegistration(127.0.0.1:46673, datanodeUuid=85117859-41c5-4d03-b7cc-6f4b75054e37, infoPort=46595, infoSecurePort=0, ipcPort=38557, storageInfo=lv=-57;cid=testClusterID;nsid=177438035;c=1732301376499) 2024-11-22T18:49:38,402 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb1cebc0407b43f0 with lease ID 0xb852cd6f6c48a3e: from storage DS-d27d05f5-3551-4666-b56d-d89ce82be1bb node DatanodeRegistration(127.0.0.1:46673, datanodeUuid=85117859-41c5-4d03-b7cc-6f4b75054e37, infoPort=46595, infoSecurePort=0, ipcPort=38557, storageInfo=lv=-57;cid=testClusterID;nsid=177438035;c=1732301376499), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T18:49:38,492 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37 2024-11-22T18:49:38,570 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/zookeeper_0, clientPort=49841, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T18:49:38,580 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49841 2024-11-22T18:49:38,590 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:49:38,594 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:49:38,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:49:38,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:49:39,283 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c with version=8 2024-11-22T18:49:39,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/hbase-staging 2024-11-22T18:49:39,378 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-22T18:49:39,666 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:49:39,677 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:49:39,678 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:49:39,682 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:49:39,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:49:39,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:49:39,817 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T18:49:39,877 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-22T18:49:39,888 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-22T18:49:39,893 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:49:39,920 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 2240 (auto-detected) 2024-11-22T18:49:39,921 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-22T18:49:39,939 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44645 2024-11-22T18:49:39,961 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44645 connecting to ZooKeeper ensemble=127.0.0.1:49841 2024-11-22T18:49:39,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:446450x0, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:49:40,001 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44645-0x10141033e850000 connected 2024-11-22T18:49:40,030 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:49:40,033 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:49:40,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:49:40,047 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c, hbase.cluster.distributed=false 2024-11-22T18:49:40,070 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:49:40,074 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44645 2024-11-22T18:49:40,074 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44645 2024-11-22T18:49:40,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44645 2024-11-22T18:49:40,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44645 2024-11-22T18:49:40,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44645 2024-11-22T18:49:40,195 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:49:40,197 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:49:40,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:49:40,198 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:49:40,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:49:40,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:49:40,201 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T18:49:40,203 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:49:40,204 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34657 2024-11-22T18:49:40,207 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34657 connecting to ZooKeeper ensemble=127.0.0.1:49841 2024-11-22T18:49:40,208 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:49:40,214 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:49:40,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:346570x0, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:49:40,226 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34657-0x10141033e850001 connected 2024-11-22T18:49:40,226 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:346570x0, quorum=127.0.0.1:49841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:49:40,231 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T18:49:40,239 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T18:49:40,242 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T18:49:40,249 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:49:40,250 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34657 2024-11-22T18:49:40,251 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34657 2024-11-22T18:49:40,253 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34657 2024-11-22T18:49:40,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34657 2024-11-22T18:49:40,256 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34657 2024-11-22T18:49:40,273 DEBUG [M:0;d79ba0c344fb:44645 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d79ba0c344fb:44645 2024-11-22T18:49:40,274 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d79ba0c344fb,44645,1732301379428 2024-11-22T18:49:40,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:49:40,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:49:40,283 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d79ba0c344fb,44645,1732301379428 2024-11-22T18:49:40,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T18:49:40,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:40,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:40,305 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T18:49:40,306 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d79ba0c344fb,44645,1732301379428 from backup master directory 2024-11-22T18:49:40,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:49:40,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d79ba0c344fb,44645,1732301379428 2024-11-22T18:49:40,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:49:40,310 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:49:40,310 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d79ba0c344fb,44645,1732301379428 2024-11-22T18:49:40,312 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-22T18:49:40,313 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-22T18:49:40,371 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/hbase.id] with ID: 95d609ae-2aa4-4ef7-9c70-48187225b38e 2024-11-22T18:49:40,371 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/.tmp/hbase.id 2024-11-22T18:49:40,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:49:40,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:49:40,385 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/.tmp/hbase.id]:[hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/hbase.id] 2024-11-22T18:49:40,428 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:49:40,433 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T18:49:40,454 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-22T18:49:40,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:40,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:40,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:49:40,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:49:40,492 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:49:40,493 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T18:49:40,499 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:49:40,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:49:40,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:49:40,551 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store 2024-11-22T18:49:40,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:49:40,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:49:40,576 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-22T18:49:40,580 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:49:40,581 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:49:40,581 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:49:40,582 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:49:40,584 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:49:40,584 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:49:40,584 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:49:40,585 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301380581Disabling compacts and flushes for region at 1732301380581Disabling writes for close at 1732301380584 (+3 ms)Writing region close event to WAL at 1732301380584Closed at 1732301380584 2024-11-22T18:49:40,588 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/.initializing 2024-11-22T18:49:40,588 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/WALs/d79ba0c344fb,44645,1732301379428 2024-11-22T18:49:40,610 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C44645%2C1732301379428, suffix=, logDir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/WALs/d79ba0c344fb,44645,1732301379428, archiveDir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/oldWALs, maxLogs=10 2024-11-22T18:49:40,619 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C44645%2C1732301379428.1732301380614 2024-11-22T18:49:40,638 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/WALs/d79ba0c344fb,44645,1732301379428/d79ba0c344fb%2C44645%2C1732301379428.1732301380614 2024-11-22T18:49:40,649 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35081:35081),(127.0.0.1/127.0.0.1:46595:46595)] 2024-11-22T18:49:40,650 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:49:40,651 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:49:40,654 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,656 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T18:49:40,725 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:40,728 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:49:40,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,732 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T18:49:40,732 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:40,733 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:49:40,733 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T18:49:40,736 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:40,737 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:49:40,737 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,740 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T18:49:40,740 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:40,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:49:40,741 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,745 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,746 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,751 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,751 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,754 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T18:49:40,758 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:49:40,762 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:49:40,764 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=865949, jitterRate=0.10111139714717865}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T18:49:40,770 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732301380669Initializing all the Stores at 1732301380671 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301380671Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301380672 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301380672Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301380673 (+1 ms)Cleaning up temporary data from old regions at 1732301380751 (+78 ms)Region opened successfully at 1732301380770 (+19 ms) 2024-11-22T18:49:40,772 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T18:49:40,806 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@271733ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:49:40,839 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T18:49:40,852 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T18:49:40,852 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T18:49:40,856 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T18:49:40,857 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-22T18:49:40,862 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-22T18:49:40,863 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T18:49:40,889 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T18:49:40,898 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T18:49:40,900 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T18:49:40,902 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T18:49:40,903 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T18:49:40,905 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T18:49:40,907 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T18:49:40,911 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T18:49:40,913 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T18:49:40,914 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T18:49:40,922 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T18:49:40,940 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T18:49:40,942 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T18:49:40,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:49:40,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:49:40,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:40,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:40,948 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d79ba0c344fb,44645,1732301379428, sessionid=0x10141033e850000, setting cluster-up flag (Was=false) 2024-11-22T18:49:40,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:40,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:40,974 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T18:49:40,976 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,44645,1732301379428 2024-11-22T18:49:40,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:40,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:40,990 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T18:49:40,991 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,44645,1732301379428 2024-11-22T18:49:40,997 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T18:49:41,060 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(746): ClusterId : 95d609ae-2aa4-4ef7-9c70-48187225b38e 2024-11-22T18:49:41,063 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T18:49:41,068 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T18:49:41,068 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T18:49:41,071 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T18:49:41,072 DEBUG [RS:0;d79ba0c344fb:34657 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e794201, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:49:41,081 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T18:49:41,089 DEBUG [RS:0;d79ba0c344fb:34657 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d79ba0c344fb:34657 2024-11-22T18:49:41,091 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T18:49:41,092 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T18:49:41,092 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T18:49:41,092 DEBUG [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T18:49:41,095 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(2659): reportForDuty to master=d79ba0c344fb,44645,1732301379428 with port=34657, startcode=1732301380157 2024-11-22T18:49:41,097 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T18:49:41,103 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d79ba0c344fb,44645,1732301379428 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T18:49:41,108 DEBUG [RS:0;d79ba0c344fb:34657 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T18:49:41,110 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:49:41,110 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:49:41,110 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:49:41,110 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:49:41,110 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d79ba0c344fb:0, corePoolSize=10, maxPoolSize=10 2024-11-22T18:49:41,111 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,111 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:49:41,111 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,112 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732301411112 2024-11-22T18:49:41,114 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T18:49:41,115 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T18:49:41,116 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:49:41,116 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T18:49:41,118 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T18:49:41,119 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T18:49:41,119 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T18:49:41,119 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T18:49:41,120 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,123 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T18:49:41,123 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:41,123 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T18:49:41,124 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T18:49:41,125 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T18:49:41,127 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T18:49:41,127 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T18:49:41,129 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301381129,5,FailOnTimeoutGroup] 2024-11-22T18:49:41,130 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301381129,5,FailOnTimeoutGroup] 2024-11-22T18:49:41,130 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,130 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T18:49:41,131 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,132 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:49:41,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:49:41,140 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T18:49:41,141 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c 2024-11-22T18:49:41,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:49:41,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:49:41,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:49:41,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:49:41,164 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:49:41,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:41,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:49:41,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:49:41,168 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:49:41,168 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:41,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:49:41,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:49:41,172 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:49:41,172 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:41,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:49:41,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:49:41,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:49:41,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:41,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:49:41,177 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:49:41,179 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740 2024-11-22T18:49:41,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740 2024-11-22T18:49:41,182 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:49:41,182 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:49:41,183 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39487, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T18:49:41,183 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:49:41,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:49:41,189 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44645 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:41,190 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:49:41,190 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785362, jitterRate=-0.001360580325126648}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:49:41,192 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44645 {}] master.ServerManager(517): Registering regionserver=d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:41,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732301381158Initializing all the Stores at 1732301381160 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301381160Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301381161 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301381161Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301381161Cleaning up temporary data from old regions at 1732301381183 (+22 ms)Region opened successfully at 1732301381193 (+10 ms) 2024-11-22T18:49:41,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:49:41,193 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:49:41,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:49:41,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:49:41,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:49:41,194 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:49:41,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301381193Disabling compacts and flushes for region at 1732301381193Disabling writes for close at 1732301381193Writing region close event to WAL at 1732301381194 (+1 ms)Closed at 1732301381194 2024-11-22T18:49:41,199 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:49:41,199 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T18:49:41,206 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T18:49:41,210 DEBUG [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c 2024-11-22T18:49:41,210 DEBUG [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44137 2024-11-22T18:49:41,210 DEBUG [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T18:49:41,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:49:41,215 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:49:41,215 DEBUG [RS:0;d79ba0c344fb:34657 {}] zookeeper.ZKUtil(111): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:41,215 WARN [RS:0;d79ba0c344fb:34657 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:49:41,216 INFO [RS:0;d79ba0c344fb:34657 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:49:41,216 DEBUG [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:41,218 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T18:49:41,218 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d79ba0c344fb,34657,1732301380157] 2024-11-22T18:49:41,242 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T18:49:41,254 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T18:49:41,259 INFO [RS:0;d79ba0c344fb:34657 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T18:49:41,259 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,260 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T18:49:41,266 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T18:49:41,267 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,267 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,267 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,268 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,268 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,268 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,268 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:49:41,268 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,268 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,268 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,269 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,269 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,269 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:49:41,269 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:49:41,269 DEBUG [RS:0;d79ba0c344fb:34657 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:49:41,270 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,270 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,270 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,270 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,271 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,271 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,34657,1732301380157-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:49:41,289 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T18:49:41,291 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,34657,1732301380157-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,291 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,291 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.Replication(171): d79ba0c344fb,34657,1732301380157 started 2024-11-22T18:49:41,313 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:41,313 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(1482): Serving as d79ba0c344fb,34657,1732301380157, RpcServer on d79ba0c344fb/172.17.0.2:34657, sessionid=0x10141033e850001 2024-11-22T18:49:41,314 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T18:49:41,315 DEBUG [RS:0;d79ba0c344fb:34657 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:41,315 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,34657,1732301380157' 2024-11-22T18:49:41,315 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T18:49:41,316 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T18:49:41,317 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T18:49:41,317 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T18:49:41,317 DEBUG [RS:0;d79ba0c344fb:34657 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:41,318 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,34657,1732301380157' 2024-11-22T18:49:41,318 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T18:49:41,318 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T18:49:41,319 DEBUG [RS:0;d79ba0c344fb:34657 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T18:49:41,319 INFO [RS:0;d79ba0c344fb:34657 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T18:49:41,319 INFO [RS:0;d79ba0c344fb:34657 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T18:49:41,369 WARN [d79ba0c344fb:44645 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T18:49:41,428 INFO [RS:0;d79ba0c344fb:34657 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C34657%2C1732301380157, suffix=, logDir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157, archiveDir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs, maxLogs=32 2024-11-22T18:49:41,430 INFO [RS:0;d79ba0c344fb:34657 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C34657%2C1732301380157.1732301381430 2024-11-22T18:49:41,439 INFO [RS:0;d79ba0c344fb:34657 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301381430 2024-11-22T18:49:41,440 DEBUG [RS:0;d79ba0c344fb:34657 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46595:46595),(127.0.0.1/127.0.0.1:35081:35081)] 2024-11-22T18:49:41,621 DEBUG [d79ba0c344fb:44645 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T18:49:41,633 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:41,641 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,34657,1732301380157, state=OPENING 2024-11-22T18:49:41,647 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T18:49:41,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:41,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:49:41,650 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:49:41,650 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:49:41,651 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:49:41,653 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,34657,1732301380157}] 2024-11-22T18:49:41,828 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T18:49:41,831 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53703, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T18:49:41,844 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T18:49:41,844 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:49:41,848 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C34657%2C1732301380157.meta, suffix=.meta, logDir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157, archiveDir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs, maxLogs=32 2024-11-22T18:49:41,849 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C34657%2C1732301380157.meta.1732301381849.meta 2024-11-22T18:49:41,857 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.meta.1732301381849.meta 2024-11-22T18:49:41,860 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46595:46595),(127.0.0.1/127.0.0.1:35081:35081)] 2024-11-22T18:49:41,861 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:49:41,862 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T18:49:41,865 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T18:49:41,870 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T18:49:41,874 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T18:49:41,875 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:49:41,875 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T18:49:41,876 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T18:49:41,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:49:41,880 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:49:41,881 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:41,881 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:49:41,882 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:49:41,883 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:49:41,883 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:41,884 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:49:41,884 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:49:41,885 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:49:41,886 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:41,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:49:41,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:49:41,888 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:49:41,888 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:41,888 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:49:41,889 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:49:41,890 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740 2024-11-22T18:49:41,892 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740 2024-11-22T18:49:41,894 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:49:41,894 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:49:41,895 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:49:41,898 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:49:41,899 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741995, jitterRate=-0.05650573968887329}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:49:41,899 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T18:49:41,901 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732301381876Writing region info on filesystem at 1732301381876Initializing all the Stores at 1732301381878 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301381878Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301381879 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301381879Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301381879Cleaning up temporary data from old regions at 1732301381894 (+15 ms)Running coprocessor post-open hooks at 1732301381900 (+6 ms)Region opened successfully at 1732301381901 (+1 ms) 2024-11-22T18:49:41,908 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732301381819 2024-11-22T18:49:41,920 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T18:49:41,920 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T18:49:41,922 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:41,924 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,34657,1732301380157, state=OPEN 2024-11-22T18:49:41,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:49:41,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:49:41,929 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:49:41,929 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:49:41,929 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:41,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T18:49:41,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,34657,1732301380157 in 277 msec 2024-11-22T18:49:41,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T18:49:41,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 732 msec 2024-11-22T18:49:41,944 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:49:41,944 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T18:49:41,964 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:49:41,966 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,34657,1732301380157, seqNum=-1] 2024-11-22T18:49:41,987 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:49:41,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49763, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:49:42,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 979 msec 2024-11-22T18:49:42,010 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732301382010, completionTime=-1 2024-11-22T18:49:42,012 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T18:49:42,012 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T18:49:42,042 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T18:49:42,042 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732301442042 2024-11-22T18:49:42,042 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732301502042 2024-11-22T18:49:42,042 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 29 msec 2024-11-22T18:49:42,045 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,44645,1732301379428-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:42,045 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,44645,1732301379428-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:42,045 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,44645,1732301379428-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:42,047 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d79ba0c344fb:44645, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:42,047 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:42,048 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:42,053 DEBUG [master/d79ba0c344fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T18:49:42,074 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.764sec 2024-11-22T18:49:42,075 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T18:49:42,077 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T18:49:42,078 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T18:49:42,078 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T18:49:42,078 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T18:49:42,079 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,44645,1732301379428-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:49:42,080 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,44645,1732301379428-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T18:49:42,088 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T18:49:42,089 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T18:49:42,090 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,44645,1732301379428-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:49:42,170 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e87ad9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:49:42,173 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-22T18:49:42,173 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-22T18:49:42,176 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d79ba0c344fb,44645,-1 for getting cluster id 2024-11-22T18:49:42,179 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T18:49:42,188 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '95d609ae-2aa4-4ef7-9c70-48187225b38e' 2024-11-22T18:49:42,191 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T18:49:42,191 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "95d609ae-2aa4-4ef7-9c70-48187225b38e" 2024-11-22T18:49:42,194 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1236d106, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:49:42,194 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d79ba0c344fb,44645,-1] 2024-11-22T18:49:42,196 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T18:49:42,198 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:49:42,200 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39972, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T18:49:42,203 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd7b3c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:49:42,203 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:49:42,210 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,34657,1732301380157, seqNum=-1] 2024-11-22T18:49:42,211 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:49:42,213 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39812, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:49:42,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d79ba0c344fb,44645,1732301379428 2024-11-22T18:49:42,242 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:49:42,253 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T18:49:42,258 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T18:49:42,263 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is d79ba0c344fb,44645,1732301379428 2024-11-22T18:49:42,266 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@52c3205e 2024-11-22T18:49:42,267 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T18:49:42,270 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39978, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T18:49:42,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44645 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T18:49:42,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44645 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T18:49:42,275 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44645 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:49:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44645 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-22T18:49:42,286 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T18:49:42,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44645 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-22T18:49:42,288 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:42,291 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T18:49:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44645 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T18:49:42,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741835_1011 (size=389) 2024-11-22T18:49:42,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741835_1011 (size=389) 2024-11-22T18:49:42,320 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fa520453eee3fe92eb0889da491d4200, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c 2024-11-22T18:49:42,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741836_1012 (size=72) 2024-11-22T18:49:42,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741836_1012 (size=72) 2024-11-22T18:49:42,332 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:49:42,332 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing fa520453eee3fe92eb0889da491d4200, disabling compactions & flushes 2024-11-22T18:49:42,332 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:49:42,332 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:49:42,333 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. after waiting 0 ms 2024-11-22T18:49:42,333 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:49:42,333 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:49:42,333 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for fa520453eee3fe92eb0889da491d4200: Waiting for close lock at 1732301382332Disabling compacts and flushes for region at 1732301382332Disabling writes for close at 1732301382333 (+1 ms)Writing region close event to WAL at 1732301382333Closed at 1732301382333 2024-11-22T18:49:42,335 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T18:49:42,357 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732301382335"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732301382335"}]},"ts":"1732301382335"} 2024-11-22T18:49:42,366 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T18:49:42,368 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T18:49:42,372 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732301382369"}]},"ts":"1732301382369"} 2024-11-22T18:49:42,377 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-22T18:49:42,379 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=fa520453eee3fe92eb0889da491d4200, ASSIGN}] 2024-11-22T18:49:42,381 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=fa520453eee3fe92eb0889da491d4200, ASSIGN 2024-11-22T18:49:42,384 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=fa520453eee3fe92eb0889da491d4200, ASSIGN; state=OFFLINE, location=d79ba0c344fb,34657,1732301380157; forceNewPlan=false, retain=false 2024-11-22T18:49:42,535 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fa520453eee3fe92eb0889da491d4200, regionState=OPENING, regionLocation=d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:42,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=fa520453eee3fe92eb0889da491d4200, ASSIGN because future has completed 2024-11-22T18:49:42,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fa520453eee3fe92eb0889da491d4200, server=d79ba0c344fb,34657,1732301380157}] 2024-11-22T18:49:42,702 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:49:42,702 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => fa520453eee3fe92eb0889da491d4200, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:49:42,703 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,703 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:49:42,703 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,703 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,705 INFO [StoreOpener-fa520453eee3fe92eb0889da491d4200-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,708 INFO [StoreOpener-fa520453eee3fe92eb0889da491d4200-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fa520453eee3fe92eb0889da491d4200 columnFamilyName info 2024-11-22T18:49:42,708 DEBUG [StoreOpener-fa520453eee3fe92eb0889da491d4200-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:49:42,709 INFO [StoreOpener-fa520453eee3fe92eb0889da491d4200-1 {}] regionserver.HStore(327): Store=fa520453eee3fe92eb0889da491d4200/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:49:42,709 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,710 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,711 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,712 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,712 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,714 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,717 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:49:42,718 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened fa520453eee3fe92eb0889da491d4200; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876470, jitterRate=0.11448976397514343}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T18:49:42,718 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fa520453eee3fe92eb0889da491d4200 2024-11-22T18:49:42,719 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for fa520453eee3fe92eb0889da491d4200: Running coprocessor pre-open hook at 1732301382703Writing region info on filesystem at 1732301382703Initializing all the Stores at 1732301382705 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301382705Cleaning up temporary data from old regions at 1732301382712 (+7 ms)Running coprocessor post-open hooks at 1732301382718 (+6 ms)Region opened successfully at 1732301382719 (+1 ms) 2024-11-22T18:49:42,721 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200., pid=6, masterSystemTime=1732301382695 2024-11-22T18:49:42,725 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:49:42,725 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:49:42,726 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fa520453eee3fe92eb0889da491d4200, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,34657,1732301380157 2024-11-22T18:49:42,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fa520453eee3fe92eb0889da491d4200, server=d79ba0c344fb,34657,1732301380157 because future has completed 2024-11-22T18:49:42,736 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T18:49:42,736 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure fa520453eee3fe92eb0889da491d4200, server=d79ba0c344fb,34657,1732301380157 in 191 msec 2024-11-22T18:49:42,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T18:49:42,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=fa520453eee3fe92eb0889da491d4200, ASSIGN in 357 msec 2024-11-22T18:49:42,741 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T18:49:42,742 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732301382741"}]},"ts":"1732301382741"} 2024-11-22T18:49:42,745 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-22T18:49:42,746 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T18:49:42,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 468 msec 2024-11-22T18:49:47,378 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T18:49:47,421 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T18:49:47,423 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-22T18:49:49,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T18:49:49,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T18:49:49,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-22T18:49:49,876 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T18:49:49,877 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:49:49,877 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T18:49:49,877 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T18:49:49,877 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T18:49:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44645 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T18:49:52,398 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-22T18:49:52,401 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-22T18:49:52,408 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-22T18:49:52,409 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:49:52,410 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C34657%2C1732301380157.1732301392409 2024-11-22T18:49:52,419 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:49:52,420 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:49:52,420 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:49:52,420 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:49:52,420 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:49:52,421 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301381430 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301392409 2024-11-22T18:49:52,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741833_1009 (size=451) 2024-11-22T18:49:52,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741833_1009 (size=451) 2024-11-22T18:49:52,427 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35081:35081),(127.0.0.1/127.0.0.1:46595:46595)] 2024-11-22T18:49:52,427 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301381430 to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs/d79ba0c344fb%2C34657%2C1732301380157.1732301381430 2024-11-22T18:49:52,436 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200., hostname=d79ba0c344fb,34657,1732301380157, seqNum=2] 2024-11-22T18:50:04,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34657 {}] regionserver.HRegion(8855): Flush requested on fa520453eee3fe92eb0889da491d4200 2024-11-22T18:50:04,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fa520453eee3fe92eb0889da491d4200 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T18:50:04,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/f2469bd225bc4de5915cc2d3269f0770 is 1080, key is row0001/info:/1732301392439/Put/seqid=0 2024-11-22T18:50:04,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741838_1014 (size=12509) 2024-11-22T18:50:04,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741838_1014 (size=12509) 2024-11-22T18:50:04,552 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/f2469bd225bc4de5915cc2d3269f0770 2024-11-22T18:50:04,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/f2469bd225bc4de5915cc2d3269f0770 as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/f2469bd225bc4de5915cc2d3269f0770 2024-11-22T18:50:04,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/f2469bd225bc4de5915cc2d3269f0770, entries=7, sequenceid=11, filesize=12.2 K 2024-11-22T18:50:04,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for fa520453eee3fe92eb0889da491d4200 in 145ms, sequenceid=11, compaction requested=false 2024-11-22T18:50:04,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fa520453eee3fe92eb0889da491d4200: 2024-11-22T18:50:08,489 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T18:50:12,481 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C34657%2C1732301380157.1732301412481 2024-11-22T18:50:12,690 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:12,690 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:12,691 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:12,691 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:12,691 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:12,691 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:12,691 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301392409 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301412481 2024-11-22T18:50:12,692 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35081:35081),(127.0.0.1/127.0.0.1:46595:46595)] 2024-11-22T18:50:12,692 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301392409 is not closed yet, will try archiving it next time 2024-11-22T18:50:12,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741837_1013 (size=12399) 2024-11-22T18:50:12,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741837_1013 (size=12399) 2024-11-22T18:50:12,896 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:15,100 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:17,304 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:19,508 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:19,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34657 {}] regionserver.HRegion(8855): Flush requested on fa520453eee3fe92eb0889da491d4200 2024-11-22T18:50:19,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fa520453eee3fe92eb0889da491d4200 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T18:50:19,710 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:19,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/9b5d75d6de214bfcad2f5a17249239c7 is 1080, key is row0008/info:/1732301406470/Put/seqid=0 2024-11-22T18:50:19,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741840_1016 (size=12509) 2024-11-22T18:50:19,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741840_1016 (size=12509) 2024-11-22T18:50:19,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/9b5d75d6de214bfcad2f5a17249239c7 2024-11-22T18:50:19,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/9b5d75d6de214bfcad2f5a17249239c7 as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/9b5d75d6de214bfcad2f5a17249239c7 2024-11-22T18:50:19,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/9b5d75d6de214bfcad2f5a17249239c7, entries=7, sequenceid=21, filesize=12.2 K 2024-11-22T18:50:19,946 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:19,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for fa520453eee3fe92eb0889da491d4200 in 438ms, sequenceid=21, compaction requested=false 2024-11-22T18:50:19,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fa520453eee3fe92eb0889da491d4200: 2024-11-22T18:50:19,946 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-22T18:50:19,946 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:50:19,947 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/f2469bd225bc4de5915cc2d3269f0770 because midkey is the same as first or last row 2024-11-22T18:50:21,712 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:22,096 INFO [master/d79ba0c344fb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T18:50:22,096 INFO [master/d79ba0c344fb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T18:50:23,916 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:23,918 WARN [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:23,919 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C34657%2C1732301380157:(num 1732301412481) roll requested 2024-11-22T18:50:23,920 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C34657%2C1732301380157.1732301423920 2024-11-22T18:50:24,127 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:24,128 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:24,128 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:24,128 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:24,128 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:24,128 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:24,128 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301412481 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301423920 2024-11-22T18:50:24,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741839_1015 (size=7739) 2024-11-22T18:50:24,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741839_1015 (size=7739) 2024-11-22T18:50:24,133 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35081:35081),(127.0.0.1/127.0.0.1:46595:46595)] 2024-11-22T18:50:24,133 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301412481 is not closed yet, will try archiving it next time 2024-11-22T18:50:24,133 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301392409 to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs/d79ba0c344fb%2C34657%2C1732301380157.1732301392409 2024-11-22T18:50:26,120 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:27,703 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fa520453eee3fe92eb0889da491d4200, had cached 0 bytes from a total of 25018 2024-11-22T18:50:28,324 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:30,527 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:32,732 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:34,734 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T18:50:34,735 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C34657%2C1732301380157.1732301434735 2024-11-22T18:50:38,490 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T18:50:39,744 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:39,746 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:39,746 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C34657%2C1732301380157:(num 1732301434735) roll requested 2024-11-22T18:50:39,746 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:39,746 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:39,747 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:39,747 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:39,750 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:39,750 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301423920 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301434735 2024-11-22T18:50:39,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741841_1017 (size=4753) 2024-11-22T18:50:39,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741841_1017 (size=4753) 2024-11-22T18:50:39,757 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35081:35081),(127.0.0.1/127.0.0.1:46595:46595)] 2024-11-22T18:50:39,757 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301423920 is not closed yet, will try archiving it next time 2024-11-22T18:50:39,757 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C34657%2C1732301380157.1732301439757 2024-11-22T18:50:44,760 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:44,761 WARN [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:44,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34657 {}] regionserver.HRegion(8855): Flush requested on fa520453eee3fe92eb0889da491d4200 2024-11-22T18:50:44,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fa520453eee3fe92eb0889da491d4200 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T18:50:44,765 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:44,765 WARN [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:46,762 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T18:50:49,763 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:49,763 WARN [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK]] 2024-11-22T18:50:49,764 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:49,764 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:49,764 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:49,764 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:49,764 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:49,765 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301434735 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301439757 2024-11-22T18:50:49,767 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46595:46595),(127.0.0.1/127.0.0.1:35081:35081)] 2024-11-22T18:50:49,767 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301434735 is not closed yet, will try archiving it next time 2024-11-22T18:50:49,767 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C34657%2C1732301380157:(num 1732301439757) roll requested 2024-11-22T18:50:49,768 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C34657%2C1732301380157.1732301449767 2024-11-22T18:50:49,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741842_1018 (size=1569) 2024-11-22T18:50:49,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741842_1018 (size=1569) 2024-11-22T18:50:49,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/19632b2ef1d54a4791596cc3ba54d262 is 1080, key is row0015/info:/1732301421510/Put/seqid=0 2024-11-22T18:50:49,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741844_1020 (size=12509) 2024-11-22T18:50:49,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741844_1020 (size=12509) 2024-11-22T18:50:49,789 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/19632b2ef1d54a4791596cc3ba54d262 2024-11-22T18:50:49,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/19632b2ef1d54a4791596cc3ba54d262 as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/19632b2ef1d54a4791596cc3ba54d262 2024-11-22T18:50:49,812 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/19632b2ef1d54a4791596cc3ba54d262, entries=7, sequenceid=31, filesize=12.2 K 2024-11-22T18:50:54,782 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK], DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK]] 2024-11-22T18:50:54,783 WARN [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK], DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK]] 2024-11-22T18:50:54,814 INFO [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK], DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK]] 2024-11-22T18:50:54,814 WARN [FSHLog-0-hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c-prefix:d79ba0c344fb,34657,1732301380157 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46673,DS-9469d354-99cd-4bc5-8079-98b94f6baea7,DISK], DatanodeInfoWithStorage[127.0.0.1:46333,DS-dcd31298-4d5f-4dc2-b56e-037a47ef4cdd,DISK]] 2024-11-22T18:50:54,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for fa520453eee3fe92eb0889da491d4200 in 10053ms, sequenceid=31, compaction requested=true 2024-11-22T18:50:54,815 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fa520453eee3fe92eb0889da491d4200: 2024-11-22T18:50:54,815 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,815 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-22T18:50:54,815 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,815 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:50:54,815 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,815 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/f2469bd225bc4de5915cc2d3269f0770 because midkey is the same as first or last row 2024-11-22T18:50:54,815 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,815 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301439757 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301449767 2024-11-22T18:50:54,817 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35081:35081),(127.0.0.1/127.0.0.1:46595:46595)] 2024-11-22T18:50:54,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa520453eee3fe92eb0889da491d4200:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:50:54,817 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301439757 is not closed yet, will try archiving it next time 2024-11-22T18:50:54,817 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C34657%2C1732301380157:(num 1732301449767) roll requested 2024-11-22T18:50:54,817 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301412481 to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs/d79ba0c344fb%2C34657%2C1732301380157.1732301412481 2024-11-22T18:50:54,818 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C34657%2C1732301380157.1732301454818 2024-11-22T18:50:54,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741843_1019 (size=438) 2024-11-22T18:50:54,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741843_1019 (size=438) 2024-11-22T18:50:54,820 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:50:54,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:50:54,823 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301423920 to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs/d79ba0c344fb%2C34657%2C1732301380157.1732301423920 2024-11-22T18:50:54,825 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301434735 to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs/d79ba0c344fb%2C34657%2C1732301380157.1732301434735 2024-11-22T18:50:54,827 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:50:54,827 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301439757 to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs/d79ba0c344fb%2C34657%2C1732301380157.1732301439757 2024-11-22T18:50:54,829 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.HStore(1541): fa520453eee3fe92eb0889da491d4200/info is initiating minor compaction (all files) 2024-11-22T18:50:54,829 INFO [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fa520453eee3fe92eb0889da491d4200/info in TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:50:54,830 INFO [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/f2469bd225bc4de5915cc2d3269f0770, hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/9b5d75d6de214bfcad2f5a17249239c7, hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/19632b2ef1d54a4791596cc3ba54d262] into tmpdir=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp, totalSize=36.6 K 2024-11-22T18:50:54,831 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] compactions.Compactor(225): Compacting f2469bd225bc4de5915cc2d3269f0770, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732301392439 2024-11-22T18:50:54,832 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b5d75d6de214bfcad2f5a17249239c7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732301406470 2024-11-22T18:50:54,833 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 19632b2ef1d54a4791596cc3ba54d262, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732301421510 2024-11-22T18:50:54,834 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,834 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,834 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,836 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,836 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,837 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301449767 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301454818 2024-11-22T18:50:54,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741845_1021 (size=93) 2024-11-22T18:50:54,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741845_1021 (size=93) 2024-11-22T18:50:54,842 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301449767 to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs/d79ba0c344fb%2C34657%2C1732301380157.1732301449767 2024-11-22T18:50:54,846 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46595:46595),(127.0.0.1/127.0.0.1:35081:35081)] 2024-11-22T18:50:54,846 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C34657%2C1732301380157.1732301454846 2024-11-22T18:50:54,865 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,865 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,865 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,865 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,865 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:50:54,866 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301454818 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301454846 2024-11-22T18:50:54,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741846_1022 (size=1258) 2024-11-22T18:50:54,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741846_1022 (size=1258) 2024-11-22T18:50:54,872 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35081:35081),(127.0.0.1/127.0.0.1:46595:46595)] 2024-11-22T18:50:54,872 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/WALs/d79ba0c344fb,34657,1732301380157/d79ba0c344fb%2C34657%2C1732301380157.1732301454818 is not closed yet, will try archiving it next time 2024-11-22T18:50:54,881 INFO [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa520453eee3fe92eb0889da491d4200#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:50:54,882 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/5e595039580f4969b8e348d3f0bab5cf is 1080, key is row0001/info:/1732301392439/Put/seqid=0 2024-11-22T18:50:54,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741848_1024 (size=27710) 2024-11-22T18:50:54,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741848_1024 (size=27710) 2024-11-22T18:50:54,903 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/5e595039580f4969b8e348d3f0bab5cf as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/5e595039580f4969b8e348d3f0bab5cf 2024-11-22T18:50:54,920 INFO [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fa520453eee3fe92eb0889da491d4200/info of fa520453eee3fe92eb0889da491d4200 into 5e595039580f4969b8e348d3f0bab5cf(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:50:54,920 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fa520453eee3fe92eb0889da491d4200: 2024-11-22T18:50:54,922 INFO [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200., storeName=fa520453eee3fe92eb0889da491d4200/info, priority=13, startTime=1732301454817; duration=0sec 2024-11-22T18:50:54,922 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T18:50:54,922 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:50:54,922 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/5e595039580f4969b8e348d3f0bab5cf because midkey is the same as first or last row 2024-11-22T18:50:54,923 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T18:50:54,923 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:50:54,923 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/5e595039580f4969b8e348d3f0bab5cf because midkey is the same as first or last row 2024-11-22T18:50:54,923 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T18:50:54,923 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:50:54,923 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/5e595039580f4969b8e348d3f0bab5cf because midkey is the same as first or last row 2024-11-22T18:50:54,923 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:50:54,923 DEBUG [RS:0;d79ba0c344fb:34657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa520453eee3fe92eb0889da491d4200:info 2024-11-22T18:51:06,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34657 {}] regionserver.HRegion(8855): Flush requested on fa520453eee3fe92eb0889da491d4200 2024-11-22T18:51:06,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fa520453eee3fe92eb0889da491d4200 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T18:51:06,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/6f6e5041556643ec9ce1260db5e33f07 is 1080, key is row0022/info:/1732301454848/Put/seqid=0 2024-11-22T18:51:06,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741849_1025 (size=12509) 2024-11-22T18:51:06,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741849_1025 (size=12509) 2024-11-22T18:51:06,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/6f6e5041556643ec9ce1260db5e33f07 2024-11-22T18:51:06,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/6f6e5041556643ec9ce1260db5e33f07 as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/6f6e5041556643ec9ce1260db5e33f07 2024-11-22T18:51:06,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/6f6e5041556643ec9ce1260db5e33f07, entries=7, sequenceid=42, filesize=12.2 K 2024-11-22T18:51:06,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for fa520453eee3fe92eb0889da491d4200 in 33ms, sequenceid=42, compaction requested=false 2024-11-22T18:51:06,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fa520453eee3fe92eb0889da491d4200: 2024-11-22T18:51:06,906 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-22T18:51:06,906 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:06,906 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/5e595039580f4969b8e348d3f0bab5cf because midkey is the same as first or last row 2024-11-22T18:51:08,490 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T18:51:12,703 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fa520453eee3fe92eb0889da491d4200, had cached 0 bytes from a total of 40219 2024-11-22T18:51:14,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T18:51:14,884 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:51:14,885 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:51:14,889 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:14,890 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:14,890 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T18:51:14,890 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T18:51:14,890 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1177783656, stopped=false 2024-11-22T18:51:14,890 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d79ba0c344fb,44645,1732301379428 2024-11-22T18:51:14,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:14,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:14,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:14,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:14,892 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:51:14,893 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:51:14,893 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:51:14,893 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:14,893 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:14,893 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:14,893 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd79ba0c344fb,34657,1732301380157' ***** 2024-11-22T18:51:14,893 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T18:51:14,894 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T18:51:14,894 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T18:51:14,894 INFO [RS:0;d79ba0c344fb:34657 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T18:51:14,894 INFO [RS:0;d79ba0c344fb:34657 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T18:51:14,894 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(3091): Received CLOSE for fa520453eee3fe92eb0889da491d4200 2024-11-22T18:51:14,895 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(959): stopping server d79ba0c344fb,34657,1732301380157 2024-11-22T18:51:14,895 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:51:14,895 INFO [RS:0;d79ba0c344fb:34657 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d79ba0c344fb:34657. 2024-11-22T18:51:14,895 DEBUG [RS:0;d79ba0c344fb:34657 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:51:14,895 DEBUG [RS:0;d79ba0c344fb:34657 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:14,895 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing fa520453eee3fe92eb0889da491d4200, disabling compactions & flushes 2024-11-22T18:51:14,895 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T18:51:14,895 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:51:14,895 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T18:51:14,896 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T18:51:14,896 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:51:14,896 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. after waiting 0 ms 2024-11-22T18:51:14,896 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:51:14,896 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T18:51:14,896 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing fa520453eee3fe92eb0889da491d4200 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-22T18:51:14,896 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T18:51:14,896 DEBUG [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(1325): Online Regions={fa520453eee3fe92eb0889da491d4200=TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T18:51:14,896 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:51:14,896 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:51:14,896 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:51:14,896 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:51:14,896 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:51:14,896 DEBUG [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, fa520453eee3fe92eb0889da491d4200 2024-11-22T18:51:14,897 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-22T18:51:14,902 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/5e787c1c03e946eb9879b80202b78ea8 is 1080, key is row0029/info:/1732301468875/Put/seqid=0 2024-11-22T18:51:14,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741850_1026 (size=8193) 2024-11-22T18:51:14,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741850_1026 (size=8193) 2024-11-22T18:51:14,910 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/5e787c1c03e946eb9879b80202b78ea8 2024-11-22T18:51:14,918 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/.tmp/info/92d70a8f06c0454fb7d33efccdebee5a is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200./info:regioninfo/1732301382726/Put/seqid=0 2024-11-22T18:51:14,919 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/.tmp/info/5e787c1c03e946eb9879b80202b78ea8 as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/5e787c1c03e946eb9879b80202b78ea8 2024-11-22T18:51:14,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741851_1027 (size=7016) 2024-11-22T18:51:14,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741851_1027 (size=7016) 2024-11-22T18:51:14,924 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/.tmp/info/92d70a8f06c0454fb7d33efccdebee5a 2024-11-22T18:51:14,927 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/5e787c1c03e946eb9879b80202b78ea8, entries=3, sequenceid=48, filesize=8.0 K 2024-11-22T18:51:14,929 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for fa520453eee3fe92eb0889da491d4200 in 33ms, sequenceid=48, compaction requested=true 2024-11-22T18:51:14,929 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/f2469bd225bc4de5915cc2d3269f0770, hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/9b5d75d6de214bfcad2f5a17249239c7, hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/19632b2ef1d54a4791596cc3ba54d262] to archive 2024-11-22T18:51:14,933 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T18:51:14,936 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/f2469bd225bc4de5915cc2d3269f0770 to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/archive/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/f2469bd225bc4de5915cc2d3269f0770 2024-11-22T18:51:14,938 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/9b5d75d6de214bfcad2f5a17249239c7 to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/archive/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/9b5d75d6de214bfcad2f5a17249239c7 2024-11-22T18:51:14,940 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/19632b2ef1d54a4791596cc3ba54d262 to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/archive/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/info/19632b2ef1d54a4791596cc3ba54d262 2024-11-22T18:51:14,948 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/.tmp/ns/29ca79b24adc47be9a21779aae2c009b is 43, key is default/ns:d/1732301381993/Put/seqid=0 2024-11-22T18:51:14,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741852_1028 (size=5153) 2024-11-22T18:51:14,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741852_1028 (size=5153) 2024-11-22T18:51:14,956 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/.tmp/ns/29ca79b24adc47be9a21779aae2c009b 2024-11-22T18:51:14,957 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d79ba0c344fb:44645 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-22T18:51:14,962 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f2469bd225bc4de5915cc2d3269f0770=12509, 9b5d75d6de214bfcad2f5a17249239c7=12509, 19632b2ef1d54a4791596cc3ba54d262=12509] 2024-11-22T18:51:14,968 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/default/TestLogRolling-testSlowSyncLogRolling/fa520453eee3fe92eb0889da491d4200/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-22T18:51:14,971 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:51:14,971 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for fa520453eee3fe92eb0889da491d4200: Waiting for close lock at 1732301474895Running coprocessor pre-close hooks at 1732301474895Disabling compacts and flushes for region at 1732301474895Disabling writes for close at 1732301474896 (+1 ms)Obtaining lock to block concurrent updates at 1732301474896Preparing flush snapshotting stores in fa520453eee3fe92eb0889da491d4200 at 1732301474896Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732301474896Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. at 1732301474897 (+1 ms)Flushing fa520453eee3fe92eb0889da491d4200/info: creating writer at 1732301474897Flushing fa520453eee3fe92eb0889da491d4200/info: appending metadata at 1732301474901 (+4 ms)Flushing fa520453eee3fe92eb0889da491d4200/info: closing flushed file at 1732301474901Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bd94b4e: reopening flushed file at 1732301474918 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for fa520453eee3fe92eb0889da491d4200 in 33ms, sequenceid=48, compaction requested=true at 1732301474929 (+11 ms)Writing region close event to WAL at 1732301474963 (+34 ms)Running coprocessor post-close hooks at 1732301474969 (+6 ms)Closed at 1732301474971 (+2 ms) 2024-11-22T18:51:14,972 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732301382271.fa520453eee3fe92eb0889da491d4200. 2024-11-22T18:51:14,983 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/.tmp/table/1ec86016847b43f485a8acb123f914db is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732301382741/Put/seqid=0 2024-11-22T18:51:14,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741853_1029 (size=5396) 2024-11-22T18:51:14,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741853_1029 (size=5396) 2024-11-22T18:51:14,989 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/.tmp/table/1ec86016847b43f485a8acb123f914db 2024-11-22T18:51:14,997 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/.tmp/info/92d70a8f06c0454fb7d33efccdebee5a as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/info/92d70a8f06c0454fb7d33efccdebee5a 2024-11-22T18:51:15,004 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/info/92d70a8f06c0454fb7d33efccdebee5a, entries=10, sequenceid=11, filesize=6.9 K 2024-11-22T18:51:15,005 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/.tmp/ns/29ca79b24adc47be9a21779aae2c009b as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/ns/29ca79b24adc47be9a21779aae2c009b 2024-11-22T18:51:15,012 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/ns/29ca79b24adc47be9a21779aae2c009b, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T18:51:15,013 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/.tmp/table/1ec86016847b43f485a8acb123f914db as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/table/1ec86016847b43f485a8acb123f914db 2024-11-22T18:51:15,020 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/table/1ec86016847b43f485a8acb123f914db, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T18:51:15,022 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false 2024-11-22T18:51:15,027 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T18:51:15,028 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:51:15,028 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:51:15,028 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301474896Running coprocessor pre-close hooks at 1732301474896Disabling compacts and flushes for region at 1732301474896Disabling writes for close at 1732301474896Obtaining lock to block concurrent updates at 1732301474897 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732301474897Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732301474897Flushing stores of hbase:meta,,1.1588230740 at 1732301474898 (+1 ms)Flushing 1588230740/info: creating writer at 1732301474898Flushing 1588230740/info: appending metadata at 1732301474917 (+19 ms)Flushing 1588230740/info: closing flushed file at 1732301474917Flushing 1588230740/ns: creating writer at 1732301474932 (+15 ms)Flushing 1588230740/ns: appending metadata at 1732301474948 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732301474948Flushing 1588230740/table: creating writer at 1732301474963 (+15 ms)Flushing 1588230740/table: appending metadata at 1732301474982 (+19 ms)Flushing 1588230740/table: closing flushed file at 1732301474982Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b438e54: reopening flushed file at 1732301474996 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b040d63: reopening flushed file at 1732301475004 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4447cf69: reopening flushed file at 1732301475012 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false at 1732301475022 (+10 ms)Writing region close event to WAL at 1732301475023 (+1 ms)Running coprocessor post-close hooks at 1732301475028 (+5 ms)Closed at 1732301475028 2024-11-22T18:51:15,028 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T18:51:15,097 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(976): stopping server d79ba0c344fb,34657,1732301380157; all regions closed. 2024-11-22T18:51:15,098 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,098 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,099 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,099 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,099 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741834_1010 (size=3066) 2024-11-22T18:51:15,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741834_1010 (size=3066) 2024-11-22T18:51:15,105 DEBUG [RS:0;d79ba0c344fb:34657 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs 2024-11-22T18:51:15,106 INFO [RS:0;d79ba0c344fb:34657 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C34657%2C1732301380157.meta:.meta(num 1732301381849) 2024-11-22T18:51:15,106 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,106 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,106 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,106 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,106 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741847_1023 (size=12695) 2024-11-22T18:51:15,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741847_1023 (size=12695) 2024-11-22T18:51:15,112 DEBUG [RS:0;d79ba0c344fb:34657 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/oldWALs 2024-11-22T18:51:15,112 INFO [RS:0;d79ba0c344fb:34657 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C34657%2C1732301380157:(num 1732301454846) 2024-11-22T18:51:15,112 DEBUG [RS:0;d79ba0c344fb:34657 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:15,112 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:51:15,113 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:51:15,113 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.ChoreService(370): Chore service for: regionserver/d79ba0c344fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T18:51:15,113 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:51:15,113 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:51:15,113 INFO [RS:0;d79ba0c344fb:34657 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34657 2024-11-22T18:51:15,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d79ba0c344fb,34657,1732301380157 2024-11-22T18:51:15,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:51:15,117 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:51:15,120 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d79ba0c344fb,34657,1732301380157] 2024-11-22T18:51:15,123 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d79ba0c344fb,34657,1732301380157 already deleted, retry=false 2024-11-22T18:51:15,123 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d79ba0c344fb,34657,1732301380157 expired; onlineServers=0 2024-11-22T18:51:15,123 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd79ba0c344fb,44645,1732301379428' ***** 2024-11-22T18:51:15,123 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T18:51:15,123 INFO [M:0;d79ba0c344fb:44645 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:51:15,123 INFO [M:0;d79ba0c344fb:44645 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:51:15,123 DEBUG [M:0;d79ba0c344fb:44645 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T18:51:15,123 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T18:51:15,124 DEBUG [M:0;d79ba0c344fb:44645 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T18:51:15,124 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301381129 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301381129,5,FailOnTimeoutGroup] 2024-11-22T18:51:15,124 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301381129 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301381129,5,FailOnTimeoutGroup] 2024-11-22T18:51:15,124 INFO [M:0;d79ba0c344fb:44645 {}] hbase.ChoreService(370): Chore service for: master/d79ba0c344fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T18:51:15,124 INFO [M:0;d79ba0c344fb:44645 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:51:15,124 DEBUG [M:0;d79ba0c344fb:44645 {}] master.HMaster(1795): Stopping service threads 2024-11-22T18:51:15,124 INFO [M:0;d79ba0c344fb:44645 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T18:51:15,124 INFO [M:0;d79ba0c344fb:44645 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:51:15,125 INFO [M:0;d79ba0c344fb:44645 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T18:51:15,125 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T18:51:15,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T18:51:15,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:15,126 DEBUG [M:0;d79ba0c344fb:44645 {}] zookeeper.ZKUtil(347): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T18:51:15,126 WARN [M:0;d79ba0c344fb:44645 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T18:51:15,127 INFO [M:0;d79ba0c344fb:44645 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/.lastflushedseqids 2024-11-22T18:51:15,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741854_1030 (size=130) 2024-11-22T18:51:15,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741854_1030 (size=130) 2024-11-22T18:51:15,139 INFO [M:0;d79ba0c344fb:44645 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T18:51:15,139 INFO [M:0;d79ba0c344fb:44645 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T18:51:15,140 DEBUG [M:0;d79ba0c344fb:44645 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:51:15,140 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:15,140 DEBUG [M:0;d79ba0c344fb:44645 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:15,140 DEBUG [M:0;d79ba0c344fb:44645 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:51:15,140 DEBUG [M:0;d79ba0c344fb:44645 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:15,140 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-22T18:51:15,158 DEBUG [M:0;d79ba0c344fb:44645 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5db61731703457e9a7df011381c3737 is 82, key is hbase:meta,,1/info:regioninfo/1732301381921/Put/seqid=0 2024-11-22T18:51:15,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741855_1031 (size=5672) 2024-11-22T18:51:15,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741855_1031 (size=5672) 2024-11-22T18:51:15,165 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5db61731703457e9a7df011381c3737 2024-11-22T18:51:15,189 DEBUG [M:0;d79ba0c344fb:44645 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a10533335be40fa91a8386d8eff8351 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732301382748/Put/seqid=0 2024-11-22T18:51:15,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741856_1032 (size=6247) 2024-11-22T18:51:15,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741856_1032 (size=6247) 2024-11-22T18:51:15,195 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a10533335be40fa91a8386d8eff8351 2024-11-22T18:51:15,201 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0a10533335be40fa91a8386d8eff8351 2024-11-22T18:51:15,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:51:15,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34657-0x10141033e850001, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:51:15,220 INFO [RS:0;d79ba0c344fb:34657 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:51:15,221 DEBUG [M:0;d79ba0c344fb:44645 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3d12f790fadb4101a27003002892ad63 is 69, key is d79ba0c344fb,34657,1732301380157/rs:state/1732301381195/Put/seqid=0 2024-11-22T18:51:15,221 INFO [RS:0;d79ba0c344fb:34657 {}] regionserver.HRegionServer(1031): Exiting; stopping=d79ba0c344fb,34657,1732301380157; zookeeper connection closed. 2024-11-22T18:51:15,221 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3b14f367 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3b14f367 2024-11-22T18:51:15,222 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T18:51:15,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741857_1033 (size=5156) 2024-11-22T18:51:15,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741857_1033 (size=5156) 2024-11-22T18:51:15,227 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3d12f790fadb4101a27003002892ad63 2024-11-22T18:51:15,249 DEBUG [M:0;d79ba0c344fb:44645 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f84f8285d95e4fd4ab26ef905db3c46c is 52, key is load_balancer_on/state:d/1732301382248/Put/seqid=0 2024-11-22T18:51:15,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741858_1034 (size=5056) 2024-11-22T18:51:15,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741858_1034 (size=5056) 2024-11-22T18:51:15,256 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f84f8285d95e4fd4ab26ef905db3c46c 2024-11-22T18:51:15,263 DEBUG [M:0;d79ba0c344fb:44645 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5db61731703457e9a7df011381c3737 as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b5db61731703457e9a7df011381c3737 2024-11-22T18:51:15,269 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b5db61731703457e9a7df011381c3737, entries=8, sequenceid=59, filesize=5.5 K 2024-11-22T18:51:15,270 DEBUG [M:0;d79ba0c344fb:44645 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a10533335be40fa91a8386d8eff8351 as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0a10533335be40fa91a8386d8eff8351 2024-11-22T18:51:15,275 INFO [regionserver/d79ba0c344fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:51:15,276 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0a10533335be40fa91a8386d8eff8351 2024-11-22T18:51:15,276 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0a10533335be40fa91a8386d8eff8351, entries=6, sequenceid=59, filesize=6.1 K 2024-11-22T18:51:15,277 DEBUG [M:0;d79ba0c344fb:44645 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3d12f790fadb4101a27003002892ad63 as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3d12f790fadb4101a27003002892ad63 2024-11-22T18:51:15,282 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3d12f790fadb4101a27003002892ad63, entries=1, sequenceid=59, filesize=5.0 K 2024-11-22T18:51:15,283 DEBUG [M:0;d79ba0c344fb:44645 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f84f8285d95e4fd4ab26ef905db3c46c as hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f84f8285d95e4fd4ab26ef905db3c46c 2024-11-22T18:51:15,289 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f84f8285d95e4fd4ab26ef905db3c46c, entries=1, sequenceid=59, filesize=4.9 K 2024-11-22T18:51:15,291 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=59, compaction requested=false 2024-11-22T18:51:15,292 INFO [M:0;d79ba0c344fb:44645 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:15,292 DEBUG [M:0;d79ba0c344fb:44645 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301475140Disabling compacts and flushes for region at 1732301475140Disabling writes for close at 1732301475140Obtaining lock to block concurrent updates at 1732301475140Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732301475140Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732301475141 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732301475141Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732301475142 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732301475157 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732301475158 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732301475172 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732301475188 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732301475188Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732301475202 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732301475220 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732301475220Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732301475233 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732301475249 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732301475249Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17f153fc: reopening flushed file at 1732301475262 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54caa3c1: reopening flushed file at 1732301475269 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b6cd900: reopening flushed file at 1732301475276 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@178921af: reopening flushed file at 1732301475283 (+7 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=59, compaction requested=false at 1732301475291 (+8 ms)Writing region close event to WAL at 1732301475292 (+1 ms)Closed at 1732301475292 2024-11-22T18:51:15,293 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,293 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,293 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,294 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,294 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:15,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741830_1006 (size=27973) 2024-11-22T18:51:15,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741830_1006 (size=27973) 2024-11-22T18:51:15,297 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:51:15,297 INFO [M:0;d79ba0c344fb:44645 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T18:51:15,297 INFO [M:0;d79ba0c344fb:44645 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44645 2024-11-22T18:51:15,297 INFO [M:0;d79ba0c344fb:44645 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:51:15,399 INFO [M:0;d79ba0c344fb:44645 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:51:15,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:51:15,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44645-0x10141033e850000, quorum=127.0.0.1:49841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:51:15,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:15,408 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:51:15,408 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:51:15,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:51:15,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/hadoop.log.dir/,STOPPED} 2024-11-22T18:51:15,412 WARN [BP-2061863200-172.17.0.2-1732301376499 heartbeating to localhost/127.0.0.1:44137 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:51:15,412 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:51:15,412 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:51:15,412 WARN [BP-2061863200-172.17.0.2-1732301376499 heartbeating to localhost/127.0.0.1:44137 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2061863200-172.17.0.2-1732301376499 (Datanode Uuid 85117859-41c5-4d03-b7cc-6f4b75054e37) service to localhost/127.0.0.1:44137 2024-11-22T18:51:15,413 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/data/data3/current/BP-2061863200-172.17.0.2-1732301376499 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:15,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/data/data4/current/BP-2061863200-172.17.0.2-1732301376499 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:15,414 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:51:15,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:15,417 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:51:15,417 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:51:15,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:51:15,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/hadoop.log.dir/,STOPPED} 2024-11-22T18:51:15,419 WARN [BP-2061863200-172.17.0.2-1732301376499 heartbeating to localhost/127.0.0.1:44137 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:51:15,419 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:51:15,419 WARN [BP-2061863200-172.17.0.2-1732301376499 heartbeating to localhost/127.0.0.1:44137 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2061863200-172.17.0.2-1732301376499 (Datanode Uuid 4bddbb0b-1525-4d24-96c5-a71db291950f) service to localhost/127.0.0.1:44137 2024-11-22T18:51:15,419 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:51:15,419 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/data/data1/current/BP-2061863200-172.17.0.2-1732301376499 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:15,420 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/cluster_6560dfe3-3cd3-7a9f-9ca8-b6dad9a14438/data/data2/current/BP-2061863200-172.17.0.2-1732301376499 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:15,420 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:51:15,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:51:15,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:51:15,430 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:51:15,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:51:15,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/hadoop.log.dir/,STOPPED} 2024-11-22T18:51:15,440 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T18:51:15,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T18:51:15,480 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44137 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/d79ba0c344fb:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/d79ba0c344fb:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3602e124 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44137 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44137 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44137 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44137 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44137 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:44137 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44137 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/d79ba0c344fb:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=141 (was 163), ProcessCount=11 (was 11), AvailableMemoryMB=8025 (was 8537) 2024-11-22T18:51:15,488 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=141, ProcessCount=11, AvailableMemoryMB=8024 2024-11-22T18:51:15,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T18:51:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/hadoop.log.dir so I do NOT create it in target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25 2024-11-22T18:51:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/adbe9932-4861-9ece-9fe0-16f5318f8d37/hadoop.tmp.dir so I do NOT create it in target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25 2024-11-22T18:51:15,489 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3, deleteOnExit=true 2024-11-22T18:51:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T18:51:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/test.cache.data in system properties and HBase conf 2024-11-22T18:51:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T18:51:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/hadoop.log.dir in system properties and HBase conf 2024-11-22T18:51:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T18:51:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T18:51:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T18:51:15,489 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T18:51:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:51:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:51:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T18:51:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:51:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T18:51:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T18:51:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:51:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:51:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T18:51:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/nfs.dump.dir in system properties and HBase conf 2024-11-22T18:51:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/java.io.tmpdir in system properties and HBase conf 2024-11-22T18:51:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:51:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T18:51:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T18:51:15,509 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:51:15,593 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:15,600 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:51:15,602 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:51:15,602 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:51:15,602 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:51:15,602 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:15,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59505eb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:51:15,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f681677{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:51:15,734 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b8ef2ff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/java.io.tmpdir/jetty-localhost-38525-hadoop-hdfs-3_4_1-tests_jar-_-any-8484332689038271039/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:51:15,734 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10885b70{HTTP/1.1, (http/1.1)}{localhost:38525} 2024-11-22T18:51:15,734 INFO [Time-limited test {}] server.Server(415): Started @101124ms 2024-11-22T18:51:15,749 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:51:15,824 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:15,828 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:51:15,829 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:51:15,829 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:51:15,829 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:51:15,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@469dec96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:51:15,830 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@eab7acc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:51:15,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2152d149{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/java.io.tmpdir/jetty-localhost-33799-hadoop-hdfs-3_4_1-tests_jar-_-any-18402857437153059712/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:15,947 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d281952{HTTP/1.1, (http/1.1)}{localhost:33799} 2024-11-22T18:51:15,947 INFO [Time-limited test {}] server.Server(415): Started @101336ms 2024-11-22T18:51:15,949 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:51:15,989 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:15,993 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:51:15,994 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:51:15,994 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:51:15,994 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:51:15,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@274298f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:51:15,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1be80f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:51:16,058 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/data/data2/current/BP-1742546238-172.17.0.2-1732301475532/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:16,058 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/data/data1/current/BP-1742546238-172.17.0.2-1732301475532/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:16,076 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:51:16,079 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3951d2f4090ea92 with lease ID 0xb6f4e7a4387aa089: Processing first storage report for DS-7470dc8c-0483-4c2d-abfa-ebacd9282069 from datanode DatanodeRegistration(127.0.0.1:37221, datanodeUuid=efc6fa0e-7742-466b-a22a-22416a0ef664, infoPort=33067, infoSecurePort=0, ipcPort=41925, storageInfo=lv=-57;cid=testClusterID;nsid=928337153;c=1732301475532) 2024-11-22T18:51:16,079 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3951d2f4090ea92 with lease ID 0xb6f4e7a4387aa089: from storage DS-7470dc8c-0483-4c2d-abfa-ebacd9282069 node DatanodeRegistration(127.0.0.1:37221, datanodeUuid=efc6fa0e-7742-466b-a22a-22416a0ef664, infoPort=33067, infoSecurePort=0, ipcPort=41925, storageInfo=lv=-57;cid=testClusterID;nsid=928337153;c=1732301475532), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:16,079 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3951d2f4090ea92 with lease ID 0xb6f4e7a4387aa089: Processing first storage report for DS-1f4b1823-8542-4884-8205-6b5cf2a420a9 from datanode DatanodeRegistration(127.0.0.1:37221, datanodeUuid=efc6fa0e-7742-466b-a22a-22416a0ef664, infoPort=33067, infoSecurePort=0, ipcPort=41925, storageInfo=lv=-57;cid=testClusterID;nsid=928337153;c=1732301475532) 2024-11-22T18:51:16,079 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3951d2f4090ea92 with lease ID 0xb6f4e7a4387aa089: from storage DS-1f4b1823-8542-4884-8205-6b5cf2a420a9 node DatanodeRegistration(127.0.0.1:37221, datanodeUuid=efc6fa0e-7742-466b-a22a-22416a0ef664, infoPort=33067, infoSecurePort=0, ipcPort=41925, storageInfo=lv=-57;cid=testClusterID;nsid=928337153;c=1732301475532), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:16,115 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6838bf55{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/java.io.tmpdir/jetty-localhost-40493-hadoop-hdfs-3_4_1-tests_jar-_-any-6523475192413300668/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:16,116 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@254e0164{HTTP/1.1, (http/1.1)}{localhost:40493} 2024-11-22T18:51:16,116 INFO [Time-limited test {}] server.Server(415): Started @101505ms 2024-11-22T18:51:16,118 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:51:16,235 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/data/data4/current/BP-1742546238-172.17.0.2-1732301475532/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:16,235 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/data/data3/current/BP-1742546238-172.17.0.2-1732301475532/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:16,253 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:51:16,256 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3c49a25102e9fb09 with lease ID 0xb6f4e7a4387aa08a: Processing first storage report for DS-dcc67336-56a6-4893-a880-67aed226f603 from datanode DatanodeRegistration(127.0.0.1:41849, datanodeUuid=23ceff28-46ae-41ca-8a37-755e38840039, infoPort=40323, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=928337153;c=1732301475532) 2024-11-22T18:51:16,256 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3c49a25102e9fb09 with lease ID 0xb6f4e7a4387aa08a: from storage DS-dcc67336-56a6-4893-a880-67aed226f603 node DatanodeRegistration(127.0.0.1:41849, datanodeUuid=23ceff28-46ae-41ca-8a37-755e38840039, infoPort=40323, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=928337153;c=1732301475532), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:16,256 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3c49a25102e9fb09 with lease ID 0xb6f4e7a4387aa08a: Processing first storage report for DS-bd56b9d0-2f67-4d3e-a095-832cdcdd50e4 from datanode DatanodeRegistration(127.0.0.1:41849, datanodeUuid=23ceff28-46ae-41ca-8a37-755e38840039, infoPort=40323, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=928337153;c=1732301475532) 2024-11-22T18:51:16,256 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3c49a25102e9fb09 with lease ID 0xb6f4e7a4387aa08a: from storage DS-bd56b9d0-2f67-4d3e-a095-832cdcdd50e4 node DatanodeRegistration(127.0.0.1:41849, datanodeUuid=23ceff28-46ae-41ca-8a37-755e38840039, infoPort=40323, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=928337153;c=1732301475532), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:16,349 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25 2024-11-22T18:51:16,352 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/zookeeper_0, clientPort=57625, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T18:51:16,353 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57625 2024-11-22T18:51:16,353 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:16,355 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:16,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:51:16,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:51:16,367 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43 with version=8 2024-11-22T18:51:16,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/hbase-staging 2024-11-22T18:51:16,369 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:51:16,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:16,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:16,370 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:51:16,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:16,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:51:16,370 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T18:51:16,370 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:51:16,371 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35543 2024-11-22T18:51:16,372 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35543 connecting to ZooKeeper ensemble=127.0.0.1:57625 2024-11-22T18:51:16,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:355430x0, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:51:16,379 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35543-0x1014104bc8f0000 connected 2024-11-22T18:51:16,402 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:16,404 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:16,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:16,407 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43, hbase.cluster.distributed=false 2024-11-22T18:51:16,409 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:51:16,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35543 2024-11-22T18:51:16,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35543 2024-11-22T18:51:16,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35543 2024-11-22T18:51:16,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35543 2024-11-22T18:51:16,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35543 2024-11-22T18:51:16,433 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:51:16,433 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:16,433 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:16,433 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:51:16,433 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:16,433 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:51:16,433 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T18:51:16,434 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:51:16,434 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37771 2024-11-22T18:51:16,436 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37771 connecting to ZooKeeper ensemble=127.0.0.1:57625 2024-11-22T18:51:16,437 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:16,439 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:16,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377710x0, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:51:16,445 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:377710x0, quorum=127.0.0.1:57625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:16,445 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37771-0x1014104bc8f0001 connected 2024-11-22T18:51:16,445 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T18:51:16,446 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T18:51:16,447 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T18:51:16,448 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:51:16,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37771 2024-11-22T18:51:16,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37771 2024-11-22T18:51:16,451 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37771 2024-11-22T18:51:16,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37771 2024-11-22T18:51:16,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37771 2024-11-22T18:51:16,468 DEBUG [M:0;d79ba0c344fb:35543 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d79ba0c344fb:35543 2024-11-22T18:51:16,469 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d79ba0c344fb,35543,1732301476369 2024-11-22T18:51:16,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:51:16,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:51:16,474 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d79ba0c344fb,35543,1732301476369 2024-11-22T18:51:16,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:16,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T18:51:16,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:16,476 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T18:51:16,477 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d79ba0c344fb,35543,1732301476369 from backup master directory 2024-11-22T18:51:16,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d79ba0c344fb,35543,1732301476369 2024-11-22T18:51:16,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:51:16,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:51:16,479 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:51:16,479 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d79ba0c344fb,35543,1732301476369 2024-11-22T18:51:16,486 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/hbase.id] with ID: 9396c27f-ad4f-4784-822d-f0cde5146644 2024-11-22T18:51:16,486 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/.tmp/hbase.id 2024-11-22T18:51:16,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:51:16,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:51:16,493 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/.tmp/hbase.id]:[hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/hbase.id] 2024-11-22T18:51:16,508 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:16,508 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T18:51:16,509 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T18:51:16,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:16,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:16,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:51:16,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:51:16,523 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:51:16,524 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T18:51:16,524 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:51:16,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:51:16,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:51:16,536 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store 2024-11-22T18:51:16,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:51:16,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:51:16,545 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:51:16,545 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:51:16,545 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:16,545 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:16,545 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:51:16,545 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:16,545 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:16,545 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301476545Disabling compacts and flushes for region at 1732301476545Disabling writes for close at 1732301476545Writing region close event to WAL at 1732301476545Closed at 1732301476545 2024-11-22T18:51:16,546 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/.initializing 2024-11-22T18:51:16,547 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/WALs/d79ba0c344fb,35543,1732301476369 2024-11-22T18:51:16,550 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C35543%2C1732301476369, suffix=, logDir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/WALs/d79ba0c344fb,35543,1732301476369, archiveDir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/oldWALs, maxLogs=10 2024-11-22T18:51:16,550 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C35543%2C1732301476369.1732301476550 2024-11-22T18:51:16,557 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/WALs/d79ba0c344fb,35543,1732301476369/d79ba0c344fb%2C35543%2C1732301476369.1732301476550 2024-11-22T18:51:16,560 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40323:40323),(127.0.0.1/127.0.0.1:33067:33067)] 2024-11-22T18:51:16,564 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:51:16,565 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:51:16,565 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,565 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,567 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,568 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T18:51:16,569 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:16,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:16,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T18:51:16,571 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:16,572 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:51:16,572 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T18:51:16,574 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:16,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:51:16,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T18:51:16,576 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:16,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:51:16,577 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,578 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,578 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,580 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,580 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,581 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T18:51:16,582 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:16,584 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:51:16,585 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=823990, jitterRate=0.04775780439376831}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T18:51:16,586 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732301476565Initializing all the Stores at 1732301476566 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301476566Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301476566Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301476566Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301476566Cleaning up temporary data from old regions at 1732301476580 (+14 ms)Region opened successfully at 1732301476586 (+6 ms) 2024-11-22T18:51:16,586 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T18:51:16,590 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27b0340c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:51:16,591 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T18:51:16,591 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T18:51:16,592 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T18:51:16,592 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T18:51:16,592 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T18:51:16,593 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T18:51:16,593 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T18:51:16,595 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T18:51:16,596 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T18:51:16,599 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T18:51:16,599 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T18:51:16,600 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T18:51:16,601 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T18:51:16,601 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T18:51:16,602 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T18:51:16,604 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T18:51:16,605 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T18:51:16,606 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T18:51:16,608 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T18:51:16,612 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T18:51:16,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:16,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:16,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:16,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:16,616 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d79ba0c344fb,35543,1732301476369, sessionid=0x1014104bc8f0000, setting cluster-up flag (Was=false) 2024-11-22T18:51:16,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:16,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:16,626 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T18:51:16,628 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,35543,1732301476369 2024-11-22T18:51:16,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:16,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:16,641 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T18:51:16,642 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,35543,1732301476369 2024-11-22T18:51:16,644 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T18:51:16,646 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T18:51:16,646 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T18:51:16,646 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T18:51:16,646 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d79ba0c344fb,35543,1732301476369 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T18:51:16,648 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:51:16,648 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:51:16,648 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:51:16,648 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:51:16,648 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d79ba0c344fb:0, corePoolSize=10, maxPoolSize=10 2024-11-22T18:51:16,648 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,649 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:51:16,649 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,650 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732301506650 2024-11-22T18:51:16,650 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T18:51:16,650 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T18:51:16,650 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T18:51:16,650 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T18:51:16,650 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T18:51:16,650 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T18:51:16,651 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,651 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:51:16,651 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T18:51:16,651 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T18:51:16,651 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T18:51:16,652 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T18:51:16,652 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T18:51:16,652 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T18:51:16,653 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:16,653 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T18:51:16,656 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301476652,5,FailOnTimeoutGroup] 2024-11-22T18:51:16,657 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301476656,5,FailOnTimeoutGroup] 2024-11-22T18:51:16,657 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,657 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T18:51:16,657 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,657 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,659 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(746): ClusterId : 9396c27f-ad4f-4784-822d-f0cde5146644 2024-11-22T18:51:16,659 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T18:51:16,662 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T18:51:16,662 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T18:51:16,665 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T18:51:16,665 DEBUG [RS:0;d79ba0c344fb:37771 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@342d5fc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:51:16,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:51:16,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:51:16,668 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T18:51:16,668 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43 2024-11-22T18:51:16,680 DEBUG [RS:0;d79ba0c344fb:37771 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d79ba0c344fb:37771 2024-11-22T18:51:16,680 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T18:51:16,680 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T18:51:16,680 DEBUG [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T18:51:16,681 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(2659): reportForDuty to master=d79ba0c344fb,35543,1732301476369 with port=37771, startcode=1732301476433 2024-11-22T18:51:16,681 DEBUG [RS:0;d79ba0c344fb:37771 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T18:51:16,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:51:16,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:51:16,683 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:51:16,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:51:16,686 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:51:16,687 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:16,687 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52097, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T18:51:16,687 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:16,687 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35543 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:16,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:51:16,688 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35543 {}] master.ServerManager(517): Registering regionserver=d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:16,689 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:51:16,689 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:16,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:16,690 DEBUG [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43 2024-11-22T18:51:16,690 DEBUG [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38965 2024-11-22T18:51:16,690 DEBUG [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T18:51:16,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:51:16,692 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:51:16,692 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:16,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:51:16,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:16,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:51:16,693 DEBUG [RS:0;d79ba0c344fb:37771 {}] zookeeper.ZKUtil(111): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:16,693 WARN [RS:0;d79ba0c344fb:37771 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:51:16,693 INFO [RS:0;d79ba0c344fb:37771 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:51:16,693 DEBUG [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/WALs/d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:16,693 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d79ba0c344fb,37771,1732301476433] 2024-11-22T18:51:16,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:51:16,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:16,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:16,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:51:16,696 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740 2024-11-22T18:51:16,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740 2024-11-22T18:51:16,697 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T18:51:16,698 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:51:16,699 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:51:16,699 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:51:16,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:51:16,701 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T18:51:16,702 INFO [RS:0;d79ba0c344fb:37771 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T18:51:16,702 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,704 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T18:51:16,705 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T18:51:16,705 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,705 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,705 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,705 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:51:16,706 DEBUG [RS:0;d79ba0c344fb:37771 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:51:16,708 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,708 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,708 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,708 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:51:16,708 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,708 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,708 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,37771,1732301476433-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:51:16,709 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800587, jitterRate=0.017999961972236633}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:51:16,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732301476683Initializing all the Stores at 1732301476684 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301476684Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301476684Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301476684Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301476684Cleaning up temporary data from old regions at 1732301476699 (+15 ms)Region opened successfully at 1732301476710 (+11 ms) 2024-11-22T18:51:16,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:51:16,710 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:51:16,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:51:16,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:51:16,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:51:16,713 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:51:16,713 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301476710Disabling compacts and flushes for region at 1732301476710Disabling writes for close at 1732301476710Writing region close event to WAL at 1732301476713 (+3 ms)Closed at 1732301476713 2024-11-22T18:51:16,715 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:51:16,715 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T18:51:16,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T18:51:16,717 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:51:16,718 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T18:51:16,737 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T18:51:16,738 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,37771,1732301476433-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,738 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,738 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.Replication(171): d79ba0c344fb,37771,1732301476433 started 2024-11-22T18:51:16,754 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:16,755 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(1482): Serving as d79ba0c344fb,37771,1732301476433, RpcServer on d79ba0c344fb/172.17.0.2:37771, sessionid=0x1014104bc8f0001 2024-11-22T18:51:16,755 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T18:51:16,755 DEBUG [RS:0;d79ba0c344fb:37771 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:16,755 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,37771,1732301476433' 2024-11-22T18:51:16,755 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T18:51:16,756 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T18:51:16,756 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T18:51:16,756 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T18:51:16,756 DEBUG [RS:0;d79ba0c344fb:37771 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:16,756 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,37771,1732301476433' 2024-11-22T18:51:16,756 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T18:51:16,757 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T18:51:16,757 DEBUG [RS:0;d79ba0c344fb:37771 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T18:51:16,757 INFO [RS:0;d79ba0c344fb:37771 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T18:51:16,757 INFO [RS:0;d79ba0c344fb:37771 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T18:51:16,860 INFO [RS:0;d79ba0c344fb:37771 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C37771%2C1732301476433, suffix=, logDir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/WALs/d79ba0c344fb,37771,1732301476433, archiveDir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/oldWALs, maxLogs=32 2024-11-22T18:51:16,862 INFO [RS:0;d79ba0c344fb:37771 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C37771%2C1732301476433.1732301476862 2024-11-22T18:51:16,868 WARN [d79ba0c344fb:35543 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T18:51:16,868 INFO [RS:0;d79ba0c344fb:37771 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/WALs/d79ba0c344fb,37771,1732301476433/d79ba0c344fb%2C37771%2C1732301476433.1732301476862 2024-11-22T18:51:16,869 DEBUG [RS:0;d79ba0c344fb:37771 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40323:40323),(127.0.0.1/127.0.0.1:33067:33067)] 2024-11-22T18:51:17,119 DEBUG [d79ba0c344fb:35543 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T18:51:17,119 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:17,121 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,37771,1732301476433, state=OPENING 2024-11-22T18:51:17,123 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T18:51:17,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:17,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:17,127 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:51:17,127 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:51:17,127 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:51:17,127 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,37771,1732301476433}] 2024-11-22T18:51:17,281 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T18:51:17,283 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40945, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T18:51:17,288 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T18:51:17,288 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:51:17,294 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C37771%2C1732301476433.meta, suffix=.meta, logDir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/WALs/d79ba0c344fb,37771,1732301476433, archiveDir=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/oldWALs, maxLogs=32 2024-11-22T18:51:17,296 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C37771%2C1732301476433.meta.1732301477296.meta 2024-11-22T18:51:17,303 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/WALs/d79ba0c344fb,37771,1732301476433/d79ba0c344fb%2C37771%2C1732301476433.meta.1732301477296.meta 2024-11-22T18:51:17,304 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40323:40323),(127.0.0.1/127.0.0.1:33067:33067)] 2024-11-22T18:51:17,308 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:51:17,309 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T18:51:17,309 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T18:51:17,309 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T18:51:17,309 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T18:51:17,309 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:51:17,309 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T18:51:17,309 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T18:51:17,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:51:17,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:51:17,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:17,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:17,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:51:17,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:51:17,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:17,316 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:17,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:51:17,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:51:17,318 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:17,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:17,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:51:17,320 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:51:17,320 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:17,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:17,321 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:51:17,322 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740 2024-11-22T18:51:17,324 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740 2024-11-22T18:51:17,325 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:51:17,325 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:51:17,326 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:51:17,329 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:51:17,330 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710134, jitterRate=-0.09701839089393616}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:51:17,330 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T18:51:17,332 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732301477310Writing region info on filesystem at 1732301477310Initializing all the Stores at 1732301477311 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301477311Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301477311Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301477311Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301477311Cleaning up temporary data from old regions at 1732301477326 (+15 ms)Running coprocessor post-open hooks at 1732301477330 (+4 ms)Region opened successfully at 1732301477332 (+2 ms) 2024-11-22T18:51:17,334 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732301477280 2024-11-22T18:51:17,337 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T18:51:17,337 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T18:51:17,338 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:17,339 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,37771,1732301476433, state=OPEN 2024-11-22T18:51:17,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:51:17,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:51:17,343 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:17,343 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:51:17,343 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:51:17,347 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T18:51:17,347 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,37771,1732301476433 in 216 msec 2024-11-22T18:51:17,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T18:51:17,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 632 msec 2024-11-22T18:51:17,351 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:51:17,351 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T18:51:17,353 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:51:17,353 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,37771,1732301476433, seqNum=-1] 2024-11-22T18:51:17,353 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:51:17,355 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59457, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:51:17,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 716 msec 2024-11-22T18:51:17,363 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732301477363, completionTime=-1 2024-11-22T18:51:17,363 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T18:51:17,363 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T18:51:17,365 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T18:51:17,366 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732301537366 2024-11-22T18:51:17,366 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732301597366 2024-11-22T18:51:17,366 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T18:51:17,366 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,35543,1732301476369-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:17,366 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,35543,1732301476369-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:17,366 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,35543,1732301476369-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:17,366 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d79ba0c344fb:35543, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:17,366 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:17,367 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:17,369 DEBUG [master/d79ba0c344fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T18:51:17,372 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.893sec 2024-11-22T18:51:17,372 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T18:51:17,372 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T18:51:17,372 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T18:51:17,372 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T18:51:17,372 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T18:51:17,372 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,35543,1732301476369-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:51:17,372 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,35543,1732301476369-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T18:51:17,375 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T18:51:17,375 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T18:51:17,376 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,35543,1732301476369-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:17,459 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab35378, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:51:17,459 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d79ba0c344fb,35543,-1 for getting cluster id 2024-11-22T18:51:17,459 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T18:51:17,461 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9396c27f-ad4f-4784-822d-f0cde5146644' 2024-11-22T18:51:17,462 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T18:51:17,462 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9396c27f-ad4f-4784-822d-f0cde5146644" 2024-11-22T18:51:17,463 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f852b64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:51:17,463 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d79ba0c344fb,35543,-1] 2024-11-22T18:51:17,463 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T18:51:17,464 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:17,466 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54932, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T18:51:17,468 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fa3cb60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:51:17,468 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:51:17,469 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,37771,1732301476433, seqNum=-1] 2024-11-22T18:51:17,470 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:51:17,471 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48364, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:51:17,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d79ba0c344fb,35543,1732301476369 2024-11-22T18:51:17,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:17,479 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T18:51:17,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T18:51:17,479 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:51:17,479 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:51:17,479 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:17,480 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:17,480 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T18:51:17,480 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T18:51:17,480 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=571199657, stopped=false 2024-11-22T18:51:17,480 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d79ba0c344fb,35543,1732301476369 2024-11-22T18:51:17,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:17,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:17,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:17,482 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:51:17,482 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:51:17,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:17,483 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:51:17,483 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:17,483 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:17,483 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:17,483 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd79ba0c344fb,37771,1732301476433' ***** 2024-11-22T18:51:17,483 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T18:51:17,484 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T18:51:17,484 INFO [RS:0;d79ba0c344fb:37771 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T18:51:17,484 INFO [RS:0;d79ba0c344fb:37771 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T18:51:17,484 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T18:51:17,484 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(959): stopping server d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:17,484 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:51:17,484 INFO [RS:0;d79ba0c344fb:37771 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d79ba0c344fb:37771. 2024-11-22T18:51:17,484 DEBUG [RS:0;d79ba0c344fb:37771 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:51:17,484 DEBUG [RS:0;d79ba0c344fb:37771 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:17,484 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T18:51:17,484 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T18:51:17,484 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T18:51:17,484 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T18:51:17,485 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T18:51:17,485 DEBUG [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T18:51:17,485 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:51:17,485 DEBUG [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T18:51:17,485 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:51:17,485 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:51:17,485 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:51:17,485 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:51:17,485 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-22T18:51:17,509 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740/.tmp/ns/bd0296ec565c4bfea7dd6deb39346cf5 is 43, key is default/ns:d/1732301477356/Put/seqid=0 2024-11-22T18:51:17,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741835_1011 (size=5153) 2024-11-22T18:51:17,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741835_1011 (size=5153) 2024-11-22T18:51:17,521 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740/.tmp/ns/bd0296ec565c4bfea7dd6deb39346cf5 2024-11-22T18:51:17,531 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740/.tmp/ns/bd0296ec565c4bfea7dd6deb39346cf5 as hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740/ns/bd0296ec565c4bfea7dd6deb39346cf5 2024-11-22T18:51:17,543 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740/ns/bd0296ec565c4bfea7dd6deb39346cf5, entries=2, sequenceid=6, filesize=5.0 K 2024-11-22T18:51:17,547 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 62ms, sequenceid=6, compaction requested=false 2024-11-22T18:51:17,547 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T18:51:17,555 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T18:51:17,556 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:51:17,557 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:51:17,557 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301477485Running coprocessor pre-close hooks at 1732301477485Disabling compacts and flushes for region at 1732301477485Disabling writes for close at 1732301477485Obtaining lock to block concurrent updates at 1732301477485Preparing flush snapshotting stores in 1588230740 at 1732301477485Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732301477486 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732301477487 (+1 ms)Flushing 1588230740/ns: creating writer at 1732301477487Flushing 1588230740/ns: appending metadata at 1732301477508 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1732301477508Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36bf354: reopening flushed file at 1732301477530 (+22 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 62ms, sequenceid=6, compaction requested=false at 1732301477547 (+17 ms)Writing region close event to WAL at 1732301477550 (+3 ms)Running coprocessor post-close hooks at 1732301477556 (+6 ms)Closed at 1732301477557 (+1 ms) 2024-11-22T18:51:17,557 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T18:51:17,685 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(976): stopping server d79ba0c344fb,37771,1732301476433; all regions closed. 2024-11-22T18:51:17,687 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,687 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,687 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,688 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,688 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741834_1010 (size=1152) 2024-11-22T18:51:17,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741834_1010 (size=1152) 2024-11-22T18:51:17,697 DEBUG [RS:0;d79ba0c344fb:37771 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/oldWALs 2024-11-22T18:51:17,697 INFO [RS:0;d79ba0c344fb:37771 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C37771%2C1732301476433.meta:.meta(num 1732301477296) 2024-11-22T18:51:17,698 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,698 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,698 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,698 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,698 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741833_1009 (size=93) 2024-11-22T18:51:17,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741833_1009 (size=93) 2024-11-22T18:51:17,703 DEBUG [RS:0;d79ba0c344fb:37771 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/oldWALs 2024-11-22T18:51:17,703 INFO [RS:0;d79ba0c344fb:37771 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C37771%2C1732301476433:(num 1732301476862) 2024-11-22T18:51:17,703 DEBUG [RS:0;d79ba0c344fb:37771 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:17,703 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:51:17,703 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:51:17,703 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.ChoreService(370): Chore service for: regionserver/d79ba0c344fb:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T18:51:17,703 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:51:17,703 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:51:17,704 INFO [RS:0;d79ba0c344fb:37771 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37771 2024-11-22T18:51:17,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d79ba0c344fb,37771,1732301476433 2024-11-22T18:51:17,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:51:17,707 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:51:17,709 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d79ba0c344fb,37771,1732301476433] 2024-11-22T18:51:17,710 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d79ba0c344fb,37771,1732301476433 already deleted, retry=false 2024-11-22T18:51:17,710 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d79ba0c344fb,37771,1732301476433 expired; onlineServers=0 2024-11-22T18:51:17,710 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd79ba0c344fb,35543,1732301476369' ***** 2024-11-22T18:51:17,710 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T18:51:17,710 INFO [M:0;d79ba0c344fb:35543 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:51:17,710 INFO [M:0;d79ba0c344fb:35543 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:51:17,710 DEBUG [M:0;d79ba0c344fb:35543 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T18:51:17,711 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T18:51:17,711 DEBUG [M:0;d79ba0c344fb:35543 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T18:51:17,711 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301476652 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301476652,5,FailOnTimeoutGroup] 2024-11-22T18:51:17,711 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301476656 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301476656,5,FailOnTimeoutGroup] 2024-11-22T18:51:17,711 INFO [M:0;d79ba0c344fb:35543 {}] hbase.ChoreService(370): Chore service for: master/d79ba0c344fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T18:51:17,711 INFO [M:0;d79ba0c344fb:35543 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:51:17,711 DEBUG [M:0;d79ba0c344fb:35543 {}] master.HMaster(1795): Stopping service threads 2024-11-22T18:51:17,711 INFO [M:0;d79ba0c344fb:35543 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T18:51:17,711 INFO [M:0;d79ba0c344fb:35543 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:51:17,711 INFO [M:0;d79ba0c344fb:35543 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T18:51:17,711 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T18:51:17,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T18:51:17,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:17,712 DEBUG [M:0;d79ba0c344fb:35543 {}] zookeeper.ZKUtil(347): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T18:51:17,712 WARN [M:0;d79ba0c344fb:35543 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T18:51:17,713 INFO [M:0;d79ba0c344fb:35543 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/.lastflushedseqids 2024-11-22T18:51:17,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741836_1012 (size=99) 2024-11-22T18:51:17,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741836_1012 (size=99) 2024-11-22T18:51:17,721 INFO [M:0;d79ba0c344fb:35543 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T18:51:17,721 INFO [M:0;d79ba0c344fb:35543 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T18:51:17,721 DEBUG [M:0;d79ba0c344fb:35543 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:51:17,721 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:17,721 DEBUG [M:0;d79ba0c344fb:35543 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:17,721 DEBUG [M:0;d79ba0c344fb:35543 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:51:17,721 DEBUG [M:0;d79ba0c344fb:35543 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:17,721 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-22T18:51:17,747 DEBUG [M:0;d79ba0c344fb:35543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10716e716ebb472baf3afd87c47baad1 is 82, key is hbase:meta,,1/info:regioninfo/1732301477338/Put/seqid=0 2024-11-22T18:51:17,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741837_1013 (size=5672) 2024-11-22T18:51:17,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741837_1013 (size=5672) 2024-11-22T18:51:17,754 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10716e716ebb472baf3afd87c47baad1 2024-11-22T18:51:17,778 DEBUG [M:0;d79ba0c344fb:35543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0c1db7d70dd247e882228ce7f0955a34 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732301477362/Put/seqid=0 2024-11-22T18:51:17,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741838_1014 (size=5275) 2024-11-22T18:51:17,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741838_1014 (size=5275) 2024-11-22T18:51:17,787 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0c1db7d70dd247e882228ce7f0955a34 2024-11-22T18:51:17,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:51:17,809 INFO [RS:0;d79ba0c344fb:37771 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:51:17,809 INFO [RS:0;d79ba0c344fb:37771 {}] regionserver.HRegionServer(1031): Exiting; stopping=d79ba0c344fb,37771,1732301476433; zookeeper connection closed. 2024-11-22T18:51:17,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37771-0x1014104bc8f0001, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:51:17,809 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1a81415b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1a81415b 2024-11-22T18:51:17,810 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T18:51:17,816 DEBUG [M:0;d79ba0c344fb:35543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08f5202e4bf543d49eecc09b4e8908ba is 69, key is d79ba0c344fb,37771,1732301476433/rs:state/1732301476688/Put/seqid=0 2024-11-22T18:51:17,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741839_1015 (size=5156) 2024-11-22T18:51:17,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741839_1015 (size=5156) 2024-11-22T18:51:17,823 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08f5202e4bf543d49eecc09b4e8908ba 2024-11-22T18:51:17,848 DEBUG [M:0;d79ba0c344fb:35543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3afdb9d076b94532835216fe34f31f74 is 52, key is load_balancer_on/state:d/1732301477477/Put/seqid=0 2024-11-22T18:51:17,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741840_1016 (size=5056) 2024-11-22T18:51:17,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741840_1016 (size=5056) 2024-11-22T18:51:17,855 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3afdb9d076b94532835216fe34f31f74 2024-11-22T18:51:17,884 DEBUG [M:0;d79ba0c344fb:35543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10716e716ebb472baf3afd87c47baad1 as hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/10716e716ebb472baf3afd87c47baad1 2024-11-22T18:51:17,894 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/10716e716ebb472baf3afd87c47baad1, entries=8, sequenceid=29, filesize=5.5 K 2024-11-22T18:51:17,895 DEBUG [M:0;d79ba0c344fb:35543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0c1db7d70dd247e882228ce7f0955a34 as hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0c1db7d70dd247e882228ce7f0955a34 2024-11-22T18:51:17,902 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0c1db7d70dd247e882228ce7f0955a34, entries=3, sequenceid=29, filesize=5.2 K 2024-11-22T18:51:17,903 DEBUG [M:0;d79ba0c344fb:35543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08f5202e4bf543d49eecc09b4e8908ba as hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/08f5202e4bf543d49eecc09b4e8908ba 2024-11-22T18:51:17,910 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/08f5202e4bf543d49eecc09b4e8908ba, entries=1, sequenceid=29, filesize=5.0 K 2024-11-22T18:51:17,912 DEBUG [M:0;d79ba0c344fb:35543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3afdb9d076b94532835216fe34f31f74 as hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3afdb9d076b94532835216fe34f31f74 2024-11-22T18:51:17,919 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/a77dd625-e76a-e9bf-e885-e7cc8b333a43/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3afdb9d076b94532835216fe34f31f74, entries=1, sequenceid=29, filesize=4.9 K 2024-11-22T18:51:17,921 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 200ms, sequenceid=29, compaction requested=false 2024-11-22T18:51:17,937 INFO [M:0;d79ba0c344fb:35543 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:17,937 DEBUG [M:0;d79ba0c344fb:35543 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301477721Disabling compacts and flushes for region at 1732301477721Disabling writes for close at 1732301477721Obtaining lock to block concurrent updates at 1732301477721Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732301477721Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732301477722 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732301477723 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732301477723Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732301477746 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732301477746Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732301477760 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732301477777 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732301477777Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732301477795 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732301477815 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732301477815Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732301477830 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732301477847 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732301477847Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b49c8c9: reopening flushed file at 1732301477863 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d69f206: reopening flushed file at 1732301477894 (+31 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36f99407: reopening flushed file at 1732301477902 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7af647da: reopening flushed file at 1732301477911 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 200ms, sequenceid=29, compaction requested=false at 1732301477921 (+10 ms)Writing region close event to WAL at 1732301477936 (+15 ms)Closed at 1732301477937 (+1 ms) 2024-11-22T18:51:17,937 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,938 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,938 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,938 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,938 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:17,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37221 is added to blk_1073741830_1006 (size=10311) 2024-11-22T18:51:17,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41849 is added to blk_1073741830_1006 (size=10311) 2024-11-22T18:51:17,941 INFO [M:0;d79ba0c344fb:35543 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T18:51:17,942 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:51:17,942 INFO [M:0;d79ba0c344fb:35543 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35543 2024-11-22T18:51:17,942 INFO [M:0;d79ba0c344fb:35543 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:51:18,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:51:18,045 INFO [M:0;d79ba0c344fb:35543 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:51:18,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35543-0x1014104bc8f0000, quorum=127.0.0.1:57625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:51:18,048 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6838bf55{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:18,049 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@254e0164{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:51:18,049 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:51:18,049 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1be80f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:51:18,049 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@274298f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/hadoop.log.dir/,STOPPED} 2024-11-22T18:51:18,051 WARN [BP-1742546238-172.17.0.2-1732301475532 heartbeating to localhost/127.0.0.1:38965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:51:18,051 WARN [BP-1742546238-172.17.0.2-1732301475532 heartbeating to localhost/127.0.0.1:38965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1742546238-172.17.0.2-1732301475532 (Datanode Uuid 23ceff28-46ae-41ca-8a37-755e38840039) service to localhost/127.0.0.1:38965 2024-11-22T18:51:18,051 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:51:18,051 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:51:18,052 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/data/data3/current/BP-1742546238-172.17.0.2-1732301475532 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:18,052 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/data/data4/current/BP-1742546238-172.17.0.2-1732301475532 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:18,052 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:51:18,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2152d149{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:18,055 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d281952{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:51:18,055 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:51:18,055 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@eab7acc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:51:18,055 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@469dec96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/hadoop.log.dir/,STOPPED} 2024-11-22T18:51:18,057 WARN [BP-1742546238-172.17.0.2-1732301475532 heartbeating to localhost/127.0.0.1:38965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:51:18,057 WARN [BP-1742546238-172.17.0.2-1732301475532 heartbeating to localhost/127.0.0.1:38965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1742546238-172.17.0.2-1732301475532 (Datanode Uuid efc6fa0e-7742-466b-a22a-22416a0ef664) service to localhost/127.0.0.1:38965 2024-11-22T18:51:18,057 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/data/data1/current/BP-1742546238-172.17.0.2-1732301475532 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:18,057 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:51:18,058 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:51:18,058 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/cluster_6789503b-ecbd-083c-0e34-302f6e55b7c3/data/data2/current/BP-1742546238-172.17.0.2-1732301475532 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:18,058 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:51:18,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b8ef2ff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:51:18,066 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10885b70{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:51:18,066 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:51:18,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f681677{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:51:18,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59505eb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/hadoop.log.dir/,STOPPED} 2024-11-22T18:51:18,073 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T18:51:18,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T18:51:18,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T18:51:18,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/hadoop.log.dir so I do NOT create it in target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157 2024-11-22T18:51:18,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/60d4b79b-5363-4722-4f6f-e4643caacf25/hadoop.tmp.dir so I do NOT create it in target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157 2024-11-22T18:51:18,095 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50, deleteOnExit=true 2024-11-22T18:51:18,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T18:51:18,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/test.cache.data in system properties and HBase conf 2024-11-22T18:51:18,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T18:51:18,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir in system properties and HBase conf 2024-11-22T18:51:18,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T18:51:18,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T18:51:18,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T18:51:18,096 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T18:51:18,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:51:18,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:51:18,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T18:51:18,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:51:18,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T18:51:18,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T18:51:18,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:51:18,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:51:18,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T18:51:18,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/nfs.dump.dir in system properties and HBase conf 2024-11-22T18:51:18,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/java.io.tmpdir in system properties and HBase conf 2024-11-22T18:51:18,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:51:18,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T18:51:18,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T18:51:18,113 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:51:18,192 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:18,198 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:51:18,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:51:18,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:51:18,200 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:51:18,201 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:18,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42b52d44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:51:18,202 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2305029e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:51:18,319 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62b96b7c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/java.io.tmpdir/jetty-localhost-41565-hadoop-hdfs-3_4_1-tests_jar-_-any-1914346959019859261/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:51:18,320 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fd186ec{HTTP/1.1, (http/1.1)}{localhost:41565} 2024-11-22T18:51:18,320 INFO [Time-limited test {}] server.Server(415): Started @103709ms 2024-11-22T18:51:18,335 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:51:18,419 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:18,423 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:51:18,424 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:51:18,424 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:51:18,424 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:51:18,424 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a107105{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:51:18,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48743db4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:51:18,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14d09ab9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/java.io.tmpdir/jetty-localhost-45479-hadoop-hdfs-3_4_1-tests_jar-_-any-8853214978204515405/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:18,545 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@ee6b493{HTTP/1.1, (http/1.1)}{localhost:45479} 2024-11-22T18:51:18,545 INFO [Time-limited test {}] server.Server(415): Started @103934ms 2024-11-22T18:51:18,547 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:51:18,592 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:18,595 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:51:18,596 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:51:18,596 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:51:18,597 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:51:18,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eee535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:51:18,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ec1a06e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:51:18,678 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data1/current/BP-618561351-172.17.0.2-1732301478132/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:18,678 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data2/current/BP-618561351-172.17.0.2-1732301478132/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:18,710 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:51:18,712 INFO [regionserver/d79ba0c344fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:51:18,714 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5aa7a3061d75335a with lease ID 0x8d42bec30390705c: Processing first storage report for DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369 from datanode DatanodeRegistration(127.0.0.1:40677, datanodeUuid=e9500cb8-11ed-459d-aef9-7ee7bed7a3fa, infoPort=37721, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132) 2024-11-22T18:51:18,714 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5aa7a3061d75335a with lease ID 0x8d42bec30390705c: from storage DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369 node DatanodeRegistration(127.0.0.1:40677, datanodeUuid=e9500cb8-11ed-459d-aef9-7ee7bed7a3fa, infoPort=37721, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:18,714 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5aa7a3061d75335a with lease ID 0x8d42bec30390705c: Processing first storage report for DS-d79452f1-ca57-4d0e-a79b-010afc708426 from datanode DatanodeRegistration(127.0.0.1:40677, datanodeUuid=e9500cb8-11ed-459d-aef9-7ee7bed7a3fa, infoPort=37721, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132) 2024-11-22T18:51:18,714 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5aa7a3061d75335a with lease ID 0x8d42bec30390705c: from storage DS-d79452f1-ca57-4d0e-a79b-010afc708426 node DatanodeRegistration(127.0.0.1:40677, datanodeUuid=e9500cb8-11ed-459d-aef9-7ee7bed7a3fa, infoPort=37721, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:18,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7330fb3f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/java.io.tmpdir/jetty-localhost-33997-hadoop-hdfs-3_4_1-tests_jar-_-any-13196709457194438106/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:18,727 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20aa2ea7{HTTP/1.1, (http/1.1)}{localhost:33997} 2024-11-22T18:51:18,727 INFO [Time-limited test {}] server.Server(415): Started @104116ms 2024-11-22T18:51:18,728 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:51:18,840 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data3/current/BP-618561351-172.17.0.2-1732301478132/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:18,840 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data4/current/BP-618561351-172.17.0.2-1732301478132/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:18,857 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:51:18,860 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb44d845f07e4576 with lease ID 0x8d42bec30390705d: Processing first storage report for DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52 from datanode DatanodeRegistration(127.0.0.1:33287, datanodeUuid=cac19669-cbaf-42c7-9aa6-abad3a8bfb0e, infoPort=46397, infoSecurePort=0, ipcPort=40973, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132) 2024-11-22T18:51:18,860 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb44d845f07e4576 with lease ID 0x8d42bec30390705d: from storage DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52 node DatanodeRegistration(127.0.0.1:33287, datanodeUuid=cac19669-cbaf-42c7-9aa6-abad3a8bfb0e, infoPort=46397, infoSecurePort=0, ipcPort=40973, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:18,860 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb44d845f07e4576 with lease ID 0x8d42bec30390705d: Processing first storage report for DS-a186da24-99c4-4b41-bd7a-c3e70c8d56cf from datanode DatanodeRegistration(127.0.0.1:33287, datanodeUuid=cac19669-cbaf-42c7-9aa6-abad3a8bfb0e, infoPort=46397, infoSecurePort=0, ipcPort=40973, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132) 2024-11-22T18:51:18,860 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb44d845f07e4576 with lease ID 0x8d42bec30390705d: from storage DS-a186da24-99c4-4b41-bd7a-c3e70c8d56cf node DatanodeRegistration(127.0.0.1:33287, datanodeUuid=cac19669-cbaf-42c7-9aa6-abad3a8bfb0e, infoPort=46397, infoSecurePort=0, ipcPort=40973, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:18,958 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157 2024-11-22T18:51:18,961 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/zookeeper_0, clientPort=59152, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T18:51:18,962 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59152 2024-11-22T18:51:18,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:18,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:18,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:51:18,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40677 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:51:18,975 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e with version=8 2024-11-22T18:51:18,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/hbase-staging 2024-11-22T18:51:18,977 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:51:18,977 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:18,977 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:18,977 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:51:18,977 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:18,977 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:51:18,978 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T18:51:18,978 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:51:18,978 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33031 2024-11-22T18:51:18,980 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33031 connecting to ZooKeeper ensemble=127.0.0.1:59152 2024-11-22T18:51:18,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:330310x0, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:51:18,987 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33031-0x1014104c6c00000 connected 2024-11-22T18:51:19,010 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:19,012 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:19,014 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:19,014 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e, hbase.cluster.distributed=false 2024-11-22T18:51:19,016 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:51:19,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33031 2024-11-22T18:51:19,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33031 2024-11-22T18:51:19,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33031 2024-11-22T18:51:19,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33031 2024-11-22T18:51:19,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33031 2024-11-22T18:51:19,035 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:51:19,035 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:19,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:19,036 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:51:19,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:19,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:51:19,036 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T18:51:19,036 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:51:19,037 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42437 2024-11-22T18:51:19,038 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42437 connecting to ZooKeeper ensemble=127.0.0.1:59152 2024-11-22T18:51:19,039 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:19,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:19,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:424370x0, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:51:19,047 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:424370x0, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:19,047 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42437-0x1014104c6c00001 connected 2024-11-22T18:51:19,047 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T18:51:19,049 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T18:51:19,050 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T18:51:19,051 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:51:19,052 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42437 2024-11-22T18:51:19,053 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42437 2024-11-22T18:51:19,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42437 2024-11-22T18:51:19,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42437 2024-11-22T18:51:19,060 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42437 2024-11-22T18:51:19,073 DEBUG [M:0;d79ba0c344fb:33031 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d79ba0c344fb:33031 2024-11-22T18:51:19,073 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d79ba0c344fb,33031,1732301478977 2024-11-22T18:51:19,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:51:19,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:51:19,076 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d79ba0c344fb,33031,1732301478977 2024-11-22T18:51:19,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T18:51:19,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,079 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T18:51:19,080 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d79ba0c344fb,33031,1732301478977 from backup master directory 2024-11-22T18:51:19,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d79ba0c344fb,33031,1732301478977 2024-11-22T18:51:19,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:51:19,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:51:19,082 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:51:19,082 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d79ba0c344fb,33031,1732301478977 2024-11-22T18:51:19,087 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/hbase.id] with ID: 7c3902c7-3f45-4562-bf97-ab3938d612f8 2024-11-22T18:51:19,087 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/.tmp/hbase.id 2024-11-22T18:51:19,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:51:19,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40677 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:51:19,097 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/.tmp/hbase.id]:[hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/hbase.id] 2024-11-22T18:51:19,110 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:19,111 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T18:51:19,112 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T18:51:19,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:51:19,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40677 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:51:19,126 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:51:19,127 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T18:51:19,127 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:51:19,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40677 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:51:19,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:51:19,140 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store 2024-11-22T18:51:19,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:51:19,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40677 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:51:19,147 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:51:19,147 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:51:19,147 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:19,147 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:19,148 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:51:19,148 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:19,148 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:51:19,148 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301479147Disabling compacts and flushes for region at 1732301479147Disabling writes for close at 1732301479148 (+1 ms)Writing region close event to WAL at 1732301479148Closed at 1732301479148 2024-11-22T18:51:19,149 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/.initializing 2024-11-22T18:51:19,149 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977 2024-11-22T18:51:19,152 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C33031%2C1732301478977, suffix=, logDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977, archiveDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/oldWALs, maxLogs=10 2024-11-22T18:51:19,152 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C33031%2C1732301478977.1732301479152 2024-11-22T18:51:19,158 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977/d79ba0c344fb%2C33031%2C1732301478977.1732301479152 2024-11-22T18:51:19,160 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46397:46397),(127.0.0.1/127.0.0.1:37721:37721)] 2024-11-22T18:51:19,161 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:51:19,161 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:51:19,162 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,162 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,163 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,165 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T18:51:19,165 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,165 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:19,166 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,167 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T18:51:19,167 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,167 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:51:19,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T18:51:19,169 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:51:19,170 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T18:51:19,171 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:51:19,172 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,173 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,173 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,174 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,174 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,175 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T18:51:19,176 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:51:19,179 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:51:19,179 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860023, jitterRate=0.09357619285583496}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T18:51:19,181 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732301479162Initializing all the Stores at 1732301479163 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301479163Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301479163Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301479163Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301479163Cleaning up temporary data from old regions at 1732301479174 (+11 ms)Region opened successfully at 1732301479180 (+6 ms) 2024-11-22T18:51:19,181 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T18:51:19,185 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2305757d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:51:19,186 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T18:51:19,186 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T18:51:19,187 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T18:51:19,187 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T18:51:19,187 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T18:51:19,188 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T18:51:19,188 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T18:51:19,191 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T18:51:19,192 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T18:51:19,193 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T18:51:19,194 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T18:51:19,194 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T18:51:19,195 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T18:51:19,196 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T18:51:19,197 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T18:51:19,200 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T18:51:19,200 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T18:51:19,202 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T18:51:19,204 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T18:51:19,206 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T18:51:19,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:19,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:19,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,209 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d79ba0c344fb,33031,1732301478977, sessionid=0x1014104c6c00000, setting cluster-up flag (Was=false) 2024-11-22T18:51:19,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,225 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T18:51:19,226 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,33031,1732301478977 2024-11-22T18:51:19,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,237 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T18:51:19,238 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,33031,1732301478977 2024-11-22T18:51:19,240 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T18:51:19,242 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T18:51:19,242 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T18:51:19,242 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T18:51:19,243 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d79ba0c344fb,33031,1732301478977 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T18:51:19,244 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:51:19,244 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:51:19,245 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:51:19,245 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:51:19,245 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d79ba0c344fb:0, corePoolSize=10, maxPoolSize=10 2024-11-22T18:51:19,245 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,245 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:51:19,245 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,247 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732301509247 2024-11-22T18:51:19,247 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T18:51:19,247 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T18:51:19,247 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T18:51:19,248 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T18:51:19,248 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T18:51:19,248 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T18:51:19,248 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:51:19,248 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T18:51:19,248 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,249 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,249 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T18:51:19,252 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T18:51:19,252 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T18:51:19,253 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T18:51:19,256 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T18:51:19,256 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T18:51:19,256 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301479256,5,FailOnTimeoutGroup] 2024-11-22T18:51:19,257 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301479256,5,FailOnTimeoutGroup] 2024-11-22T18:51:19,260 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,260 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T18:51:19,260 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,260 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,262 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(746): ClusterId : 7c3902c7-3f45-4562-bf97-ab3938d612f8 2024-11-22T18:51:19,262 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T18:51:19,266 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T18:51:19,266 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T18:51:19,269 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T18:51:19,270 DEBUG [RS:0;d79ba0c344fb:42437 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e5cb49c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:51:19,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40677 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:51:19,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:51:19,287 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T18:51:19,287 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e 2024-11-22T18:51:19,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40677 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:51:19,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:51:19,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:51:19,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:51:19,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:51:19,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,302 DEBUG [RS:0;d79ba0c344fb:42437 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d79ba0c344fb:42437 2024-11-22T18:51:19,302 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T18:51:19,302 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T18:51:19,302 DEBUG [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T18:51:19,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:19,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:51:19,303 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(2659): reportForDuty to master=d79ba0c344fb,33031,1732301478977 with port=42437, startcode=1732301479035 2024-11-22T18:51:19,303 DEBUG [RS:0;d79ba0c344fb:42437 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T18:51:19,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:51:19,304 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:19,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:51:19,306 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45309, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T18:51:19,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:51:19,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,306 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33031 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:19,306 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33031 {}] master.ServerManager(517): Registering regionserver=d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:19,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:19,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:51:19,308 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:51:19,308 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,309 DEBUG [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e 2024-11-22T18:51:19,309 DEBUG [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44035 2024-11-22T18:51:19,309 DEBUG [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T18:51:19,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:19,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:51:19,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740 2024-11-22T18:51:19,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740 2024-11-22T18:51:19,312 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:51:19,312 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:51:19,313 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:51:19,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:51:19,314 DEBUG [RS:0;d79ba0c344fb:42437 {}] zookeeper.ZKUtil(111): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:19,314 WARN [RS:0;d79ba0c344fb:42437 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:51:19,314 INFO [RS:0;d79ba0c344fb:42437 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:51:19,314 DEBUG [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:19,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:51:19,317 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d79ba0c344fb,42437,1732301479035] 2024-11-22T18:51:19,317 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:51:19,318 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=839429, jitterRate=0.06738923490047455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:51:19,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732301479299Initializing all the Stores at 1732301479300 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301479300Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301479300Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301479300Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301479300Cleaning up temporary data from old regions at 1732301479312 (+12 ms)Region opened successfully at 1732301479319 (+7 ms) 2024-11-22T18:51:19,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:51:19,319 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:51:19,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:51:19,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:51:19,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:51:19,320 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T18:51:19,320 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:51:19,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301479319Disabling compacts and flushes for region at 1732301479319Disabling writes for close at 1732301479319Writing region close event to WAL at 1732301479320 (+1 ms)Closed at 1732301479320 2024-11-22T18:51:19,322 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:51:19,322 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T18:51:19,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T18:51:19,324 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:51:19,326 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T18:51:19,328 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T18:51:19,329 INFO [RS:0;d79ba0c344fb:42437 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T18:51:19,329 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,334 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T18:51:19,335 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T18:51:19,335 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,335 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:51:19,336 DEBUG [RS:0;d79ba0c344fb:42437 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:51:19,337 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,337 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,337 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,337 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,337 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,337 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42437,1732301479035-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:51:19,357 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T18:51:19,357 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42437,1732301479035-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,357 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,357 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.Replication(171): d79ba0c344fb,42437,1732301479035 started 2024-11-22T18:51:19,373 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:19,374 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(1482): Serving as d79ba0c344fb,42437,1732301479035, RpcServer on d79ba0c344fb/172.17.0.2:42437, sessionid=0x1014104c6c00001 2024-11-22T18:51:19,374 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T18:51:19,374 DEBUG [RS:0;d79ba0c344fb:42437 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:19,374 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,42437,1732301479035' 2024-11-22T18:51:19,374 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T18:51:19,374 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T18:51:19,375 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T18:51:19,375 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T18:51:19,375 DEBUG [RS:0;d79ba0c344fb:42437 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:19,375 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,42437,1732301479035' 2024-11-22T18:51:19,375 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T18:51:19,376 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T18:51:19,376 DEBUG [RS:0;d79ba0c344fb:42437 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T18:51:19,377 INFO [RS:0;d79ba0c344fb:42437 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T18:51:19,377 INFO [RS:0;d79ba0c344fb:42437 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T18:51:19,476 WARN [d79ba0c344fb:33031 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T18:51:19,479 INFO [RS:0;d79ba0c344fb:42437 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C42437%2C1732301479035, suffix=, logDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035, archiveDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs, maxLogs=32 2024-11-22T18:51:19,481 INFO [RS:0;d79ba0c344fb:42437 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C42437%2C1732301479035.1732301479480 2024-11-22T18:51:19,496 INFO [RS:0;d79ba0c344fb:42437 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 2024-11-22T18:51:19,500 DEBUG [RS:0;d79ba0c344fb:42437 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46397:46397),(127.0.0.1/127.0.0.1:37721:37721)] 2024-11-22T18:51:19,726 DEBUG [d79ba0c344fb:33031 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T18:51:19,727 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:19,729 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,42437,1732301479035, state=OPENING 2024-11-22T18:51:19,731 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T18:51:19,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:19,733 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:51:19,733 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:51:19,733 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,42437,1732301479035}] 2024-11-22T18:51:19,735 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:51:19,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:51:19,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T18:51:19,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-22T18:51:19,888 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T18:51:19,891 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39733, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T18:51:19,895 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T18:51:19,896 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:51:19,898 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C42437%2C1732301479035.meta, suffix=.meta, logDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035, archiveDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs, maxLogs=32 2024-11-22T18:51:19,899 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta 2024-11-22T18:51:19,905 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta 2024-11-22T18:51:19,951 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37721:37721),(127.0.0.1/127.0.0.1:46397:46397)] 2024-11-22T18:51:19,952 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:51:19,953 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T18:51:19,953 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T18:51:19,953 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T18:51:19,953 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T18:51:19,953 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:51:19,953 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T18:51:19,953 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T18:51:19,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:51:19,956 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:51:19,956 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:19,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:51:19,958 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:51:19,958 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,958 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:19,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:51:19,959 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:51:19,959 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:19,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:51:19,961 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:51:19,961 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:19,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:51:19,961 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:51:19,962 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740 2024-11-22T18:51:19,964 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740 2024-11-22T18:51:19,965 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:51:19,965 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:51:19,966 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:51:19,968 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:51:19,969 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721613, jitterRate=-0.0824224054813385}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:51:19,969 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T18:51:19,970 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732301479954Writing region info on filesystem at 1732301479954Initializing all the Stores at 1732301479955 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301479955Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301479955Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301479955Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301479955Cleaning up temporary data from old regions at 1732301479965 (+10 ms)Running coprocessor post-open hooks at 1732301479969 (+4 ms)Region opened successfully at 1732301479970 (+1 ms) 2024-11-22T18:51:19,971 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732301479887 2024-11-22T18:51:19,973 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:51:19,975 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T18:51:19,975 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T18:51:19,976 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:19,978 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,42437,1732301479035, state=OPEN 2024-11-22T18:51:19,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:51:19,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:51:19,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:51:19,986 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:19,986 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:51:19,986 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:51:19,990 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T18:51:19,990 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,42437,1732301479035 in 253 msec 2024-11-22T18:51:19,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T18:51:19,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 668 msec 2024-11-22T18:51:19,995 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:51:19,995 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T18:51:19,997 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:51:19,997 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,42437,1732301479035, seqNum=-1] 2024-11-22T18:51:19,998 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:51:20,000 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35597, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:51:20,008 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 765 msec 2024-11-22T18:51:20,008 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732301480008, completionTime=-1 2024-11-22T18:51:20,008 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T18:51:20,008 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T18:51:20,010 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T18:51:20,010 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732301540010 2024-11-22T18:51:20,010 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732301600010 2024-11-22T18:51:20,010 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T18:51:20,011 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,33031,1732301478977-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,011 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,33031,1732301478977-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,011 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,33031,1732301478977-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,011 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d79ba0c344fb:33031, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,011 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,011 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,014 DEBUG [master/d79ba0c344fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T18:51:20,016 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.934sec 2024-11-22T18:51:20,017 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T18:51:20,017 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T18:51:20,017 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T18:51:20,017 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T18:51:20,017 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T18:51:20,017 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,33031,1732301478977-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:51:20,017 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,33031,1732301478977-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T18:51:20,020 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T18:51:20,020 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T18:51:20,020 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,33031,1732301478977-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,062 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e8113bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:51:20,063 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d79ba0c344fb,33031,-1 for getting cluster id 2024-11-22T18:51:20,063 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T18:51:20,066 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c3902c7-3f45-4562-bf97-ab3938d612f8' 2024-11-22T18:51:20,067 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T18:51:20,067 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c3902c7-3f45-4562-bf97-ab3938d612f8" 2024-11-22T18:51:20,067 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6321df1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:51:20,067 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d79ba0c344fb,33031,-1] 2024-11-22T18:51:20,068 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T18:51:20,068 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:20,070 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T18:51:20,071 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10c7ae56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:51:20,071 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:51:20,072 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,42437,1732301479035, seqNum=-1] 2024-11-22T18:51:20,073 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:51:20,079 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46974, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:51:20,082 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d79ba0c344fb,33031,1732301478977 2024-11-22T18:51:20,082 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:20,086 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T18:51:20,112 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:51:20,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:20,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:20,112 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:51:20,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:51:20,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:51:20,112 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T18:51:20,116 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:51:20,117 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35983 2024-11-22T18:51:20,119 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35983 connecting to ZooKeeper ensemble=127.0.0.1:59152 2024-11-22T18:51:20,120 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:20,123 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:51:20,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359830x0, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:51:20,130 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:359830x0, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-22T18:51:20,130 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-22T18:51:20,131 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35983-0x1014104c6c00002 connected 2024-11-22T18:51:20,132 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T18:51:20,136 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T18:51:20,137 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35983-0x1014104c6c00002, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T18:51:20,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35983-0x1014104c6c00002, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:51:20,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35983 2024-11-22T18:51:20,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35983 2024-11-22T18:51:20,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35983 2024-11-22T18:51:20,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35983 2024-11-22T18:51:20,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35983 2024-11-22T18:51:20,154 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(746): ClusterId : 7c3902c7-3f45-4562-bf97-ab3938d612f8 2024-11-22T18:51:20,154 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T18:51:20,163 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T18:51:20,163 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T18:51:20,167 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T18:51:20,168 DEBUG [RS:1;d79ba0c344fb:35983 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@221f891a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:51:20,187 DEBUG [RS:1;d79ba0c344fb:35983 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;d79ba0c344fb:35983 2024-11-22T18:51:20,187 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T18:51:20,187 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T18:51:20,187 DEBUG [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T18:51:20,189 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(2659): reportForDuty to master=d79ba0c344fb,33031,1732301478977 with port=35983, startcode=1732301480111 2024-11-22T18:51:20,189 DEBUG [RS:1;d79ba0c344fb:35983 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T18:51:20,191 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32961, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T18:51:20,192 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33031 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d79ba0c344fb,35983,1732301480111 2024-11-22T18:51:20,192 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33031 {}] master.ServerManager(517): Registering regionserver=d79ba0c344fb,35983,1732301480111 2024-11-22T18:51:20,194 DEBUG [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e 2024-11-22T18:51:20,194 DEBUG [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44035 2024-11-22T18:51:20,194 DEBUG [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T18:51:20,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:51:20,202 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d79ba0c344fb,35983,1732301480111] 2024-11-22T18:51:20,203 DEBUG [RS:1;d79ba0c344fb:35983 {}] zookeeper.ZKUtil(111): regionserver:35983-0x1014104c6c00002, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d79ba0c344fb,35983,1732301480111 2024-11-22T18:51:20,203 WARN [RS:1;d79ba0c344fb:35983 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:51:20,203 INFO [RS:1;d79ba0c344fb:35983 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:51:20,203 DEBUG [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111 2024-11-22T18:51:20,215 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T18:51:20,221 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T18:51:20,224 INFO [RS:1;d79ba0c344fb:35983 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T18:51:20,225 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,225 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T18:51:20,226 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T18:51:20,226 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,226 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,226 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,226 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:51:20,227 DEBUG [RS:1;d79ba0c344fb:35983 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:51:20,236 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,236 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,236 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,236 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,236 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,236 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,35983,1732301480111-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:51:20,261 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T18:51:20,261 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,35983,1732301480111-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,261 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,261 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.Replication(171): d79ba0c344fb,35983,1732301480111 started 2024-11-22T18:51:20,283 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:51:20,284 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(1482): Serving as d79ba0c344fb,35983,1732301480111, RpcServer on d79ba0c344fb/172.17.0.2:35983, sessionid=0x1014104c6c00002 2024-11-22T18:51:20,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;d79ba0c344fb:35983,5,FailOnTimeoutGroup] 2024-11-22T18:51:20,284 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T18:51:20,284 DEBUG [RS:1;d79ba0c344fb:35983 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d79ba0c344fb,35983,1732301480111 2024-11-22T18:51:20,284 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,35983,1732301480111' 2024-11-22T18:51:20,284 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T18:51:20,284 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-22T18:51:20,285 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T18:51:20,285 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T18:51:20,285 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T18:51:20,285 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T18:51:20,285 DEBUG [RS:1;d79ba0c344fb:35983 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d79ba0c344fb,35983,1732301480111 2024-11-22T18:51:20,285 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,35983,1732301480111' 2024-11-22T18:51:20,285 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T18:51:20,286 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T18:51:20,286 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is d79ba0c344fb,33031,1732301478977 2024-11-22T18:51:20,286 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1fa8201a 2024-11-22T18:51:20,286 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T18:51:20,286 DEBUG [RS:1;d79ba0c344fb:35983 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T18:51:20,286 INFO [RS:1;d79ba0c344fb:35983 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T18:51:20,287 INFO [RS:1;d79ba0c344fb:35983 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T18:51:20,289 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54060, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T18:51:20,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33031 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T18:51:20,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33031 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T18:51:20,290 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33031 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:51:20,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33031 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T18:51:20,293 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T18:51:20,293 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:20,293 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33031 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-22T18:51:20,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33031 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T18:51:20,295 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T18:51:20,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741835_1011 (size=393) 2024-11-22T18:51:20,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40677 is added to blk_1073741835_1011 (size=393) 2024-11-22T18:51:20,309 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e10faa3d3c31514930ebfe5adc82aa17, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e 2024-11-22T18:51:20,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40677 is added to blk_1073741836_1012 (size=76) 2024-11-22T18:51:20,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741836_1012 (size=76) 2024-11-22T18:51:20,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:51:20,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing e10faa3d3c31514930ebfe5adc82aa17, disabling compactions & flushes 2024-11-22T18:51:20,320 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:20,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:20,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. after waiting 0 ms 2024-11-22T18:51:20,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:20,321 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:20,321 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for e10faa3d3c31514930ebfe5adc82aa17: Waiting for close lock at 1732301480320Disabling compacts and flushes for region at 1732301480320Disabling writes for close at 1732301480320Writing region close event to WAL at 1732301480320Closed at 1732301480320 2024-11-22T18:51:20,325 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T18:51:20,327 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732301480326"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732301480326"}]},"ts":"1732301480326"} 2024-11-22T18:51:20,330 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T18:51:20,332 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T18:51:20,332 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732301480332"}]},"ts":"1732301480332"} 2024-11-22T18:51:20,335 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-22T18:51:20,336 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e10faa3d3c31514930ebfe5adc82aa17, ASSIGN}] 2024-11-22T18:51:20,338 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e10faa3d3c31514930ebfe5adc82aa17, ASSIGN 2024-11-22T18:51:20,340 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e10faa3d3c31514930ebfe5adc82aa17, ASSIGN; state=OFFLINE, location=d79ba0c344fb,42437,1732301479035; forceNewPlan=false, retain=false 2024-11-22T18:51:20,389 INFO [RS:1;d79ba0c344fb:35983 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C35983%2C1732301480111, suffix=, logDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111, archiveDir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs, maxLogs=32 2024-11-22T18:51:20,390 INFO [RS:1;d79ba0c344fb:35983 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C35983%2C1732301480111.1732301480390 2024-11-22T18:51:20,401 INFO [RS:1;d79ba0c344fb:35983 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 2024-11-22T18:51:20,408 DEBUG [RS:1;d79ba0c344fb:35983 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37721:37721),(127.0.0.1/127.0.0.1:46397:46397)] 2024-11-22T18:51:20,491 INFO [d79ba0c344fb:33031 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-22T18:51:20,491 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e10faa3d3c31514930ebfe5adc82aa17, regionState=OPENING, regionLocation=d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:20,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e10faa3d3c31514930ebfe5adc82aa17, ASSIGN because future has completed 2024-11-22T18:51:20,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e10faa3d3c31514930ebfe5adc82aa17, server=d79ba0c344fb,42437,1732301479035}] 2024-11-22T18:51:20,506 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T18:51:20,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:51:20,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:51:20,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:51:20,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:51:20,655 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:20,656 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e10faa3d3c31514930ebfe5adc82aa17, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:51:20,656 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,657 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:51:20,657 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,657 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,659 INFO [StoreOpener-e10faa3d3c31514930ebfe5adc82aa17-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,661 INFO [StoreOpener-e10faa3d3c31514930ebfe5adc82aa17-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e10faa3d3c31514930ebfe5adc82aa17 columnFamilyName info 2024-11-22T18:51:20,661 DEBUG [StoreOpener-e10faa3d3c31514930ebfe5adc82aa17-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:51:20,661 INFO [StoreOpener-e10faa3d3c31514930ebfe5adc82aa17-1 {}] regionserver.HStore(327): Store=e10faa3d3c31514930ebfe5adc82aa17/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:51:20,662 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,662 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,663 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,664 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,664 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,666 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,669 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:51:20,669 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e10faa3d3c31514930ebfe5adc82aa17; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773324, jitterRate=-0.016667842864990234}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T18:51:20,670 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:20,670 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e10faa3d3c31514930ebfe5adc82aa17: Running coprocessor pre-open hook at 1732301480657Writing region info on filesystem at 1732301480657Initializing all the Stores at 1732301480658 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301480658Cleaning up temporary data from old regions at 1732301480664 (+6 ms)Running coprocessor post-open hooks at 1732301480670 (+6 ms)Region opened successfully at 1732301480670 2024-11-22T18:51:20,672 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17., pid=6, masterSystemTime=1732301480649 2024-11-22T18:51:20,675 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:20,675 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:20,677 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e10faa3d3c31514930ebfe5adc82aa17, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:20,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e10faa3d3c31514930ebfe5adc82aa17, server=d79ba0c344fb,42437,1732301479035 because future has completed 2024-11-22T18:51:20,686 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T18:51:20,687 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e10faa3d3c31514930ebfe5adc82aa17, server=d79ba0c344fb,42437,1732301479035 in 187 msec 2024-11-22T18:51:20,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T18:51:20,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e10faa3d3c31514930ebfe5adc82aa17, ASSIGN in 350 msec 2024-11-22T18:51:20,691 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T18:51:20,692 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732301480691"}]},"ts":"1732301480691"} 2024-11-22T18:51:20,694 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-22T18:51:20,695 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T18:51:20,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 405 msec 2024-11-22T18:51:25,378 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T18:51:25,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:51:25,394 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:51:25,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:51:25,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:51:25,403 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-22T18:51:29,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T18:51:29,873 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T18:51:29,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T18:51:29,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-22T18:51:29,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:51:29,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T18:51:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33031 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T18:51:30,368 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-22T18:51:30,368 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-22T18:51:30,371 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T18:51:30,371 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:30,385 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:30,388 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:51:30,389 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:51:30,389 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:51:30,389 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:51:30,390 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d0f4a9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:51:30,390 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ff5703b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:51:30,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2047cbbb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/java.io.tmpdir/jetty-localhost-38723-hadoop-hdfs-3_4_1-tests_jar-_-any-3690129176119907418/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:30,507 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2021586{HTTP/1.1, (http/1.1)}{localhost:38723} 2024-11-22T18:51:30,507 INFO [Time-limited test {}] server.Server(415): Started @115896ms 2024-11-22T18:51:30,508 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:51:30,542 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:30,545 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:51:30,546 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:51:30,546 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:51:30,546 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:51:30,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@519de6b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:51:30,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167a7fde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:51:30,611 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data5/current/BP-618561351-172.17.0.2-1732301478132/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:30,611 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data6/current/BP-618561351-172.17.0.2-1732301478132/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:30,634 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:51:30,636 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bc459a265d19ddb with lease ID 0x8d42bec30390705e: Processing first storage report for DS-8585a096-2fa1-4304-9e66-228a52d53d0d from datanode DatanodeRegistration(127.0.0.1:33799, datanodeUuid=9bd3246e-f722-407b-b012-5ddb9136aa7e, infoPort=33211, infoSecurePort=0, ipcPort=34195, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132) 2024-11-22T18:51:30,636 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bc459a265d19ddb with lease ID 0x8d42bec30390705e: from storage DS-8585a096-2fa1-4304-9e66-228a52d53d0d node DatanodeRegistration(127.0.0.1:33799, datanodeUuid=9bd3246e-f722-407b-b012-5ddb9136aa7e, infoPort=33211, infoSecurePort=0, ipcPort=34195, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:30,636 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bc459a265d19ddb with lease ID 0x8d42bec30390705e: Processing first storage report for DS-48a79a5a-623c-4706-b1f9-d45951074696 from datanode DatanodeRegistration(127.0.0.1:33799, datanodeUuid=9bd3246e-f722-407b-b012-5ddb9136aa7e, infoPort=33211, infoSecurePort=0, ipcPort=34195, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132) 2024-11-22T18:51:30,636 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bc459a265d19ddb with lease ID 0x8d42bec30390705e: from storage DS-48a79a5a-623c-4706-b1f9-d45951074696 node DatanodeRegistration(127.0.0.1:33799, datanodeUuid=9bd3246e-f722-407b-b012-5ddb9136aa7e, infoPort=33211, infoSecurePort=0, ipcPort=34195, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:30,670 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b5be5aa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/java.io.tmpdir/jetty-localhost-37399-hadoop-hdfs-3_4_1-tests_jar-_-any-4952022849847632350/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:30,670 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f7383da{HTTP/1.1, (http/1.1)}{localhost:37399} 2024-11-22T18:51:30,671 INFO [Time-limited test {}] server.Server(415): Started @116060ms 2024-11-22T18:51:30,672 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:51:30,707 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:30,710 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:51:30,712 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:51:30,712 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:51:30,712 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:51:30,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ff2063{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:51:30,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dc59954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:51:30,777 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data8/current/BP-618561351-172.17.0.2-1732301478132/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:30,777 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data7/current/BP-618561351-172.17.0.2-1732301478132/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:30,815 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:51:30,817 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ce53ddd7d84ea76 with lease ID 0x8d42bec30390705f: Processing first storage report for DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8 from datanode DatanodeRegistration(127.0.0.1:46699, datanodeUuid=be31cbf1-fe93-4a70-ac33-1973209f892f, infoPort=35069, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132) 2024-11-22T18:51:30,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ce53ddd7d84ea76 with lease ID 0x8d42bec30390705f: from storage DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8 node DatanodeRegistration(127.0.0.1:46699, datanodeUuid=be31cbf1-fe93-4a70-ac33-1973209f892f, infoPort=35069, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:30,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ce53ddd7d84ea76 with lease ID 0x8d42bec30390705f: Processing first storage report for DS-f18ec120-e35c-4c1a-978c-f6203944e9ca from datanode DatanodeRegistration(127.0.0.1:46699, datanodeUuid=be31cbf1-fe93-4a70-ac33-1973209f892f, infoPort=35069, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132) 2024-11-22T18:51:30,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ce53ddd7d84ea76 with lease ID 0x8d42bec30390705f: from storage DS-f18ec120-e35c-4c1a-978c-f6203944e9ca node DatanodeRegistration(127.0.0.1:46699, datanodeUuid=be31cbf1-fe93-4a70-ac33-1973209f892f, infoPort=35069, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:30,855 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30add41a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/java.io.tmpdir/jetty-localhost-38577-hadoop-hdfs-3_4_1-tests_jar-_-any-18056486209247974392/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:30,856 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fd17220{HTTP/1.1, (http/1.1)}{localhost:38577} 2024-11-22T18:51:30,856 INFO [Time-limited test {}] server.Server(415): Started @116245ms 2024-11-22T18:51:30,857 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:51:30,976 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10/current/BP-618561351-172.17.0.2-1732301478132/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:30,976 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9/current/BP-618561351-172.17.0.2-1732301478132/current, will proceed with Du for space computation calculation, 2024-11-22T18:51:31,002 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:51:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb12a4c865b3bd5ed with lease ID 0x8d42bec303907060: Processing first storage report for DS-66ee2b9c-500a-4ee8-b156-fc240374cf40 from datanode DatanodeRegistration(127.0.0.1:40553, datanodeUuid=21bac189-52c3-492e-8931-114bf278e511, infoPort=45083, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132) 2024-11-22T18:51:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb12a4c865b3bd5ed with lease ID 0x8d42bec303907060: from storage DS-66ee2b9c-500a-4ee8-b156-fc240374cf40 node DatanodeRegistration(127.0.0.1:40553, datanodeUuid=21bac189-52c3-492e-8931-114bf278e511, infoPort=45083, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb12a4c865b3bd5ed with lease ID 0x8d42bec303907060: Processing first storage report for DS-dbbe6d07-86fc-4e6a-86f6-d20052abf9b8 from datanode DatanodeRegistration(127.0.0.1:40553, datanodeUuid=21bac189-52c3-492e-8931-114bf278e511, infoPort=45083, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132) 2024-11-22T18:51:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb12a4c865b3bd5ed with lease ID 0x8d42bec303907060: from storage DS-dbbe6d07-86fc-4e6a-86f6-d20052abf9b8 node DatanodeRegistration(127.0.0.1:40553, datanodeUuid=21bac189-52c3-492e-8931-114bf278e511, infoPort=45083, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:31,079 WARN [ResponseProcessor for block BP-618561351-172.17.0.2-1732301478132:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-618561351-172.17.0.2-1732301478132:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,079 WARN [ResponseProcessor for block BP-618561351-172.17.0.2-1732301478132:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-618561351-172.17.0.2-1732301478132:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,080 WARN [ResponseProcessor for block BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,080 WARN [DataStreamer for file /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 block BP-618561351-172.17.0.2-1732301478132:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK], DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:31,080 WARN [DataStreamer for file /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta block BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK], DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:31,080 WARN [PacketResponder: BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33287] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,081 WARN [DataStreamer for file /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977/d79ba0c344fb%2C33031%2C1732301478977.1732301479152 block BP-618561351-172.17.0.2-1732301478132:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK], DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:31,081 WARN [ResponseProcessor for block BP-618561351-172.17.0.2-1732301478132:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-618561351-172.17.0.2-1732301478132:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-618561351-172.17.0.2-1732301478132:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,081 WARN [DataStreamer for file /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 block BP-618561351-172.17.0.2-1732301478132:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK], DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:31,082 WARN [PacketResponder: BP-618561351-172.17.0.2-1732301478132:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33287] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:43612 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33287:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43612 dst: /127.0.0.1:33287 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,084 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:36640 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36640 dst: /127.0.0.1:40677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,084 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2014935513_22 at /127.0.0.1:36666 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36666 dst: /127.0.0.1:40677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,084 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1190636397_22 at /127.0.0.1:43584 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33287:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43584 dst: /127.0.0.1:33287 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:36628 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36628 dst: /127.0.0.1:40677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,084 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2014935513_22 at /127.0.0.1:43658 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:33287:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43658 dst: /127.0.0.1:33287 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,084 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:43626 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33287:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43626 dst: /127.0.0.1:33287 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,084 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1190636397_22 at /127.0.0.1:36604 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36604 dst: /127.0.0.1:40677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7330fb3f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:31,086 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20aa2ea7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:51:31,087 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:51:31,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ec1a06e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:51:31,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eee535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,STOPPED} 2024-11-22T18:51:31,089 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:51:31,089 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:51:31,089 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-618561351-172.17.0.2-1732301478132 (Datanode Uuid cac19669-cbaf-42c7-9aa6-abad3a8bfb0e) service to localhost/127.0.0.1:44035 2024-11-22T18:51:31,089 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:51:31,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data3/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:31,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data4/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:31,090 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:51:31,090 WARN [DataStreamer for file /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977/d79ba0c344fb%2C33031%2C1732301478977.1732301479152 block BP-618561351-172.17.0.2-1732301478132:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,090 WARN [DataStreamer for file /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 block BP-618561351-172.17.0.2-1732301478132:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,090 WARN [DataStreamer for file /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta block BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,092 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@71fa2b9f {}] datanode.DataXceiver(331): 127.0.0.1:40677:DataXceiver error processing unknown operation src: /127.0.0.1:39588 dst: /127.0.0.1:40677 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:31,093 WARN [DataStreamer for file /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 block BP-618561351-172.17.0.2-1732301478132:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14d09ab9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:31,095 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ee6b493{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:51:31,095 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:51:31,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48743db4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:51:31,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a107105{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,STOPPED} 2024-11-22T18:51:31,097 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:51:31,097 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:51:31,097 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-618561351-172.17.0.2-1732301478132 (Datanode Uuid e9500cb8-11ed-459d-aef9-7ee7bed7a3fa) service to localhost/127.0.0.1:44035 2024-11-22T18:51:31,097 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:51:31,097 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data1/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:31,098 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data2/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:31,098 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:51:31,102 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17., hostname=d79ba0c344fb,42437,1732301479035, seqNum=2] 2024-11-22T18:51:31,105 ERROR [FSHLog-0-hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e-prefix:d79ba0c344fb,42437,1732301479035 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,105 WARN [FSHLog-0-hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e-prefix:d79ba0c344fb,42437,1732301479035 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,105 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,105 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C42437%2C1732301479035:(num 1732301479480) roll requested 2024-11-22T18:51:31,105 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C42437%2C1732301479035.1732301491105 2024-11-22T18:51:31,112 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:31,112 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:31,112 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:31,112 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:31,112 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:31,112 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301491105 2024-11-22T18:51:31,113 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,113 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:31,114 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-22T18:51:31,114 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-22T18:51:31,114 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 2024-11-22T18:51:31,117 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35069:35069),(127.0.0.1/127.0.0.1:33211:33211)] 2024-11-22T18:51:31,117 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 is not closed yet, will try archiving it next time 2024-11-22T18:51:31,122 WARN [IPC Server handler 1 on default port 44035 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-22T18:51:31,126 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 after 10ms 2024-11-22T18:51:31,128 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:32,229 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:33,117 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:33,118 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301491105 2024-11-22T18:51:33,119 WARN [ResponseProcessor for block BP-618561351-172.17.0.2-1732301478132:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-618561351-172.17.0.2-1732301478132:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:33,119 WARN [DataStreamer for file /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301491105 block BP-618561351-172.17.0.2-1732301478132:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK]) is bad. 2024-11-22T18:51:33,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:56532 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:46699:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56532 dst: /127.0.0.1:46699 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:33,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:60686 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33799:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60686 dst: /127.0.0.1:33799 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:33,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b5be5aa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:33,122 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f7383da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:51:33,122 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:51:33,122 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167a7fde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:51:33,122 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@519de6b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,STOPPED} 2024-11-22T18:51:33,124 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:51:33,124 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:51:33,124 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-618561351-172.17.0.2-1732301478132 (Datanode Uuid be31cbf1-fe93-4a70-ac33-1973209f892f) service to localhost/127.0.0.1:44035 2024-11-22T18:51:33,124 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:51:33,125 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data8/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:33,125 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data7/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:33,125 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:51:33,128 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:34,229 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:35,118 WARN [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]] 2024-11-22T18:51:35,118 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:35,118 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C42437%2C1732301479035:(num 1732301491105) roll requested 2024-11-22T18:51:35,118 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C42437%2C1732301479035.1732301495118 2024-11-22T18:51:35,121 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:35,121 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:35,121 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741839_1021 2024-11-22T18:51:35,124 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK] 2024-11-22T18:51:35,126 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 after 4012ms 2024-11-22T18:51:35,128 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46699 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:35,128 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:60716 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data6]'}, localName='127.0.0.1:33799', datanodeUuid='9bd3246e-f722-407b-b012-5ddb9136aa7e', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741840_1022 to mirror 127.0.0.1:46699 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:35,128 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK], DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK]) is bad. 2024-11-22T18:51:35,128 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741840_1022 2024-11-22T18:51:35,128 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:60716 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T18:51:35,128 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:60716 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:33799:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60716 dst: /127.0.0.1:33799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:35,129 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK] 2024-11-22T18:51:35,129 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:35,130 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:35,130 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]) is bad. 2024-11-22T18:51:35,130 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741841_1023 2024-11-22T18:51:35,130 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK] 2024-11-22T18:51:35,131 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T18:51:35,134 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:35,135 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:35,135 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:35,135 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:35,135 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:35,135 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301491105 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301495118 2024-11-22T18:51:35,136 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45083:45083),(127.0.0.1/127.0.0.1:33211:33211)] 2024-11-22T18:51:35,136 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 is not closed yet, will try archiving it next time 2024-11-22T18:51:35,136 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301491105 is not closed yet, will try archiving it next time 2024-11-22T18:51:35,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33799 is added to blk_1073741838_1020 (size=3600) 2024-11-22T18:51:35,538 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 is not closed yet, will try archiving it next time 2024-11-22T18:51:36,230 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:36,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741838_1020 (size=3600) 2024-11-22T18:51:37,129 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,135 WARN [ResponseProcessor for block BP-618561351-172.17.0.2-1732301478132:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-618561351-172.17.0.2-1732301478132:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-618561351-172.17.0.2-1732301478132:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,136 WARN [DataStreamer for file /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301495118 block BP-618561351-172.17.0.2-1732301478132:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:37,136 WARN [PacketResponder: BP-618561351-172.17.0.2-1732301478132:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33799] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,136 WARN [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]] 2024-11-22T18:51:37,136 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,136 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37460 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37460 dst: /127.0.0.1:40553 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,136 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C42437%2C1732301479035:(num 1732301495118) roll requested 2024-11-22T18:51:37,136 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:60718 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33799:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60718 dst: /127.0.0.1:33799 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,137 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C42437%2C1732301479035.1732301497136 2024-11-22T18:51:37,138 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2047cbbb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:37,138 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2021586{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:51:37,138 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:51:37,138 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ff5703b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:51:37,138 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d0f4a9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,STOPPED} 2024-11-22T18:51:37,140 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,140 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK], DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK]) is bad. 2024-11-22T18:51:37,140 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741843_1026 2024-11-22T18:51:37,140 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:51:37,140 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-618561351-172.17.0.2-1732301478132 (Datanode Uuid 9bd3246e-f722-407b-b012-5ddb9136aa7e) service to localhost/127.0.0.1:44035 2024-11-22T18:51:37,141 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK] 2024-11-22T18:51:37,141 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data5/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:37,141 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data6/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:51:37,141 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:51:37,141 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:51:37,141 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:51:37,143 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33287 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37480 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741844_1027 to mirror 127.0.0.1:33287 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,144 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:37,144 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741844_1027 2024-11-22T18:51:37,144 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37480 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T18:51:37,144 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37480 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37480 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,144 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK] 2024-11-22T18:51:37,145 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,145 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:37,145 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741845_1028 2024-11-22T18:51:37,146 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:37,148 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,148 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]) is bad. 2024-11-22T18:51:37,148 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741846_1029 2024-11-22T18:51:37,148 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK] 2024-11-22T18:51:37,149 WARN [IPC Server handler 3 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T18:51:37,149 WARN [IPC Server handler 3 on default port 44035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T18:51:37,149 WARN [IPC Server handler 3 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T18:51:37,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42437 {}] regionserver.HRegion(8855): Flush requested on e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:37,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e10faa3d3c31514930ebfe5adc82aa17 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T18:51:37,153 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:37,153 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:37,153 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:37,153 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:37,153 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:37,153 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301495118 with entries=5, filesize=4.96 KB; new WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301497136 2024-11-22T18:51:37,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741842_1025 (size=5092) 2024-11-22T18:51:37,159 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45083:45083)] 2024-11-22T18:51:37,159 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 is not closed yet, will try archiving it next time 2024-11-22T18:51:37,159 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301495118 is not closed yet, will try archiving it next time 2024-11-22T18:51:37,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/e2b6bc41715d402c86142e2e3a32ed57 is 1080, key is row0002/info:/1732301493126/Put/seqid=0 2024-11-22T18:51:37,177 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,177 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]) is bad. 2024-11-22T18:51:37,177 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741848_1031 2024-11-22T18:51:37,178 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK] 2024-11-22T18:51:37,179 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,179 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK], DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK]) is bad. 2024-11-22T18:51:37,179 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741849_1032 2024-11-22T18:51:37,180 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK] 2024-11-22T18:51:37,182 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33287 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37500 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741850_1033 to mirror 127.0.0.1:33287 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,182 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:37,182 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741850_1033 2024-11-22T18:51:37,182 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37500 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T18:51:37,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37500 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37500 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,183 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK] 2024-11-22T18:51:37,185 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37502 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741851_1034 to mirror 127.0.0.1:33799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,185 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:37,185 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741851_1034 2024-11-22T18:51:37,185 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37502 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T18:51:37,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37502 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37502 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,185 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:37,186 WARN [IPC Server handler 0 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T18:51:37,186 WARN [IPC Server handler 0 on default port 44035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T18:51:37,186 WARN [IPC Server handler 0 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T18:51:37,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741852_1035 (size=10347) 2024-11-22T18:51:37,556 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 is not closed yet, will try archiving it next time 2024-11-22T18:51:37,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/e2b6bc41715d402c86142e2e3a32ed57 2024-11-22T18:51:37,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/e2b6bc41715d402c86142e2e3a32ed57 as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/e2b6bc41715d402c86142e2e3a32ed57 2024-11-22T18:51:37,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/e2b6bc41715d402c86142e2e3a32ed57, entries=5, sequenceid=11, filesize=10.1 K 2024-11-22T18:51:37,603 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for e10faa3d3c31514930ebfe5adc82aa17 in 452ms, sequenceid=11, compaction requested=false 2024-11-22T18:51:37,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e10faa3d3c31514930ebfe5adc82aa17: 2024-11-22T18:51:37,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42437 {}] regionserver.HRegion(8855): Flush requested on e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:37,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e10faa3d3c31514930ebfe5adc82aa17 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-22T18:51:37,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/7eeac4764d2a45748383b30aa015bd1e is 1080, key is row0007/info:/1732301497153/Put/seqid=0 2024-11-22T18:51:37,787 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,787 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK], DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:37,787 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741853_1036 2024-11-22T18:51:37,788 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:37,790 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,790 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK], DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK]) is bad. 2024-11-22T18:51:37,790 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741854_1037 2024-11-22T18:51:37,790 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK] 2024-11-22T18:51:37,791 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,791 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]) is bad. 2024-11-22T18:51:37,792 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741855_1038 2024-11-22T18:51:37,792 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK] 2024-11-22T18:51:37,794 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33287 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:37,794 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37522 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741856_1039 to mirror 127.0.0.1:33287 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,794 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:37,794 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741856_1039 2024-11-22T18:51:37,794 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37522 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T18:51:37,794 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37522 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37522 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:37,795 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK] 2024-11-22T18:51:37,795 WARN [IPC Server handler 3 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T18:51:37,795 WARN [IPC Server handler 3 on default port 44035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T18:51:37,795 WARN [IPC Server handler 3 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T18:51:37,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741857_1040 (size=12506) 2024-11-22T18:51:38,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/7eeac4764d2a45748383b30aa015bd1e 2024-11-22T18:51:38,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/7eeac4764d2a45748383b30aa015bd1e as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/7eeac4764d2a45748383b30aa015bd1e 2024-11-22T18:51:38,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/7eeac4764d2a45748383b30aa015bd1e, entries=7, sequenceid=24, filesize=12.2 K 2024-11-22T18:51:38,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for e10faa3d3c31514930ebfe5adc82aa17 in 431ms, sequenceid=24, compaction requested=false 2024-11-22T18:51:38,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e10faa3d3c31514930ebfe5adc82aa17: 2024-11-22T18:51:38,212 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-22T18:51:38,212 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:38,212 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/7eeac4764d2a45748383b30aa015bd1e because midkey is the same as first or last row 2024-11-22T18:51:38,230 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,129 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,159 WARN [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]] 2024-11-22T18:51:39,159 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,159 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C42437%2C1732301479035:(num 1732301497136) roll requested 2024-11-22T18:51:39,160 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C42437%2C1732301479035.1732301499160 2024-11-22T18:51:39,163 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33287 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,163 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37536 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741858_1041 to mirror 127.0.0.1:33287 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:39,164 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:39,164 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741858_1041 2024-11-22T18:51:39,164 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37536 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T18:51:39,164 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37536 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37536 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:39,164 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK] 2024-11-22T18:51:39,165 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,166 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK], DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK]) is bad. 2024-11-22T18:51:39,166 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741859_1042 2024-11-22T18:51:39,166 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK] 2024-11-22T18:51:39,167 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,168 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]) is bad. 2024-11-22T18:51:39,168 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741860_1043 2024-11-22T18:51:39,168 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK] 2024-11-22T18:51:39,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37548 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741861_1044 to mirror 127.0.0.1:33799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:39,170 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,170 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37548 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T18:51:39,171 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:39,171 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741861_1044 2024-11-22T18:51:39,171 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37548 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37548 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:39,171 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:39,172 WARN [IPC Server handler 4 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T18:51:39,172 WARN [IPC Server handler 4 on default port 44035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T18:51:39,172 WARN [IPC Server handler 4 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T18:51:39,175 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:39,176 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:39,176 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:39,176 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:39,176 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:39,176 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301497136 with entries=19, filesize=19.35 KB; new WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301499160 2024-11-22T18:51:39,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741847_1030 (size=19824) 2024-11-22T18:51:39,181 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45083:45083)] 2024-11-22T18:51:39,181 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 is not closed yet, will try archiving it next time 2024-11-22T18:51:39,181 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301497136 is not closed yet, will try archiving it next time 2024-11-22T18:51:39,182 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301491105 to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs/d79ba0c344fb%2C42437%2C1732301479035.1732301491105 2024-11-22T18:51:39,183 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301495118 to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs/d79ba0c344fb%2C42437%2C1732301479035.1732301495118 2024-11-22T18:51:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42437 {}] regionserver.HRegion(8855): Flush requested on e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:39,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e10faa3d3c31514930ebfe5adc82aa17 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T18:51:39,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/98f17b0c5f984f0d8f945a7d89f93ef1 is 1079, key is tmprow/info:/1732301499200/Put/seqid=0 2024-11-22T18:51:39,207 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,207 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK], DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:39,207 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741863_1046 2024-11-22T18:51:39,208 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:39,210 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37562 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741864_1047 to mirror 127.0.0.1:40677 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:39,210 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40677 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,210 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]) is bad. 2024-11-22T18:51:39,210 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37562 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T18:51:39,210 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741864_1047 2024-11-22T18:51:39,210 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37562 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37562 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:39,211 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK] 2024-11-22T18:51:39,212 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,212 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK]) is bad. 2024-11-22T18:51:39,212 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741865_1048 2024-11-22T18:51:39,212 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK] 2024-11-22T18:51:39,213 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,213 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:39,213 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741866_1049 2024-11-22T18:51:39,214 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK] 2024-11-22T18:51:39,214 WARN [IPC Server handler 1 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T18:51:39,214 WARN [IPC Server handler 1 on default port 44035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T18:51:39,214 WARN [IPC Server handler 1 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T18:51:39,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741867_1050 (size=6027) 2024-11-22T18:51:39,579 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 is not closed yet, will try archiving it next time 2024-11-22T18:51:39,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/98f17b0c5f984f0d8f945a7d89f93ef1 2024-11-22T18:51:39,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/98f17b0c5f984f0d8f945a7d89f93ef1 as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/98f17b0c5f984f0d8f945a7d89f93ef1 2024-11-22T18:51:39,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/98f17b0c5f984f0d8f945a7d89f93ef1, entries=1, sequenceid=34, filesize=5.9 K 2024-11-22T18:51:39,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for e10faa3d3c31514930ebfe5adc82aa17 in 431ms, sequenceid=34, compaction requested=true 2024-11-22T18:51:39,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e10faa3d3c31514930ebfe5adc82aa17: 2024-11-22T18:51:39,632 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-22T18:51:39,632 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:39,632 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/7eeac4764d2a45748383b30aa015bd1e because midkey is the same as first or last row 2024-11-22T18:51:39,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e10faa3d3c31514930ebfe5adc82aa17:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:51:39,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:51:39,633 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:51:39,634 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:51:39,634 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HStore(1541): e10faa3d3c31514930ebfe5adc82aa17/info is initiating minor compaction (all files) 2024-11-22T18:51:39,634 INFO [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e10faa3d3c31514930ebfe5adc82aa17/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:39,634 INFO [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/e2b6bc41715d402c86142e2e3a32ed57, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/7eeac4764d2a45748383b30aa015bd1e, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/98f17b0c5f984f0d8f945a7d89f93ef1] into tmpdir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp, totalSize=28.2 K 2024-11-22T18:51:39,634 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] compactions.Compactor(225): Compacting e2b6bc41715d402c86142e2e3a32ed57, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732301493126 2024-11-22T18:51:39,635 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7eeac4764d2a45748383b30aa015bd1e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732301497153 2024-11-22T18:51:39,635 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 98f17b0c5f984f0d8f945a7d89f93ef1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732301499200 2024-11-22T18:51:39,648 INFO [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e10faa3d3c31514930ebfe5adc82aa17#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:51:39,649 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/d5a752a2c67044b6a2d637f1cc90a09d is 1080, key is row0002/info:/1732301493126/Put/seqid=0 2024-11-22T18:51:39,651 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37614 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741868_1051 to mirror 127.0.0.1:33799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:39,651 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:39,651 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741868_1051 2024-11-22T18:51:39,651 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37614 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T18:51:39,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37614 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37614 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:39,652 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:39,653 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,653 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK], DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:39,653 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741869_1052 2024-11-22T18:51:39,653 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK] 2024-11-22T18:51:39,654 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,654 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK], DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK]) is bad. 2024-11-22T18:51:39,654 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741870_1053 2024-11-22T18:51:39,655 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK] 2024-11-22T18:51:39,656 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:39,656 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]) is bad. 2024-11-22T18:51:39,656 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741871_1054 2024-11-22T18:51:39,656 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK] 2024-11-22T18:51:39,657 WARN [IPC Server handler 2 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T18:51:39,657 WARN [IPC Server handler 2 on default port 44035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T18:51:39,657 WARN [IPC Server handler 2 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T18:51:39,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741872_1055 (size=17994) 2024-11-22T18:51:40,006 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6928852c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40553, datanodeUuid=21bac189-52c3-492e-8931-114bf278e511, infoPort=45083, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132):Failed to transfer BP-618561351-172.17.0.2-1732301478132:blk_1073741842_1025 to 127.0.0.1:33799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:40,006 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@67ef6679[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40553, datanodeUuid=21bac189-52c3-492e-8931-114bf278e511, infoPort=45083, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132):Failed to transfer BP-618561351-172.17.0.2-1732301478132:blk_1073741852_1035 to 127.0.0.1:46699 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:40,067 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/d5a752a2c67044b6a2d637f1cc90a09d as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/d5a752a2c67044b6a2d637f1cc90a09d 2024-11-22T18:51:40,074 INFO [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e10faa3d3c31514930ebfe5adc82aa17/info of e10faa3d3c31514930ebfe5adc82aa17 into d5a752a2c67044b6a2d637f1cc90a09d(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:51:40,074 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e10faa3d3c31514930ebfe5adc82aa17: 2024-11-22T18:51:40,074 INFO [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17., storeName=e10faa3d3c31514930ebfe5adc82aa17/info, priority=13, startTime=1732301499632; duration=0sec 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/d5a752a2c67044b6a2d637f1cc90a09d because midkey is the same as first or last row 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/d5a752a2c67044b6a2d637f1cc90a09d because midkey is the same as first or last row 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/d5a752a2c67044b6a2d637f1cc90a09d because midkey is the same as first or last row 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:51:40,075 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e10faa3d3c31514930ebfe5adc82aa17:info 2024-11-22T18:51:40,230 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:40,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42437 {}] regionserver.HRegion(8855): Flush requested on e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:40,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e10faa3d3c31514930ebfe5adc82aa17 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T18:51:40,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/eb652672f66744b98b161d4163284591 is 1079, key is tmprow/info:/1732301500620/Put/seqid=0 2024-11-22T18:51:40,629 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40677 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:40,629 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37636 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741873_1056 to mirror 127.0.0.1:40677 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:40,629 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]) is bad. 2024-11-22T18:51:40,629 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741873_1056 2024-11-22T18:51:40,629 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37636 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T18:51:40,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:37636 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37636 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:40,630 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK] 2024-11-22T18:51:40,631 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:40,631 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK]) is bad. 2024-11-22T18:51:40,631 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741874_1057 2024-11-22T18:51:40,632 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46699,DS-981ab2ca-7430-42e8-9913-2bb76ba87bc8,DISK] 2024-11-22T18:51:40,633 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:40,633 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:40,633 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741875_1058 2024-11-22T18:51:40,634 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:40,635 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:40,635 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]) is bad. 2024-11-22T18:51:40,635 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741876_1059 2024-11-22T18:51:40,635 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33287,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK] 2024-11-22T18:51:40,636 WARN [IPC Server handler 3 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T18:51:40,636 WARN [IPC Server handler 3 on default port 44035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T18:51:40,636 WARN [IPC Server handler 3 on default port 44035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T18:51:40,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741877_1060 (size=6027) 2024-11-22T18:51:41,006 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@67ef6679[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40553, datanodeUuid=21bac189-52c3-492e-8931-114bf278e511, infoPort=45083, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132):Failed to transfer BP-618561351-172.17.0.2-1732301478132:blk_1073741857_1040 to 127.0.0.1:46699 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:41,006 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6928852c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40553, datanodeUuid=21bac189-52c3-492e-8931-114bf278e511, infoPort=45083, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132):Failed to transfer BP-618561351-172.17.0.2-1732301478132:blk_1073741847_1030 to 127.0.0.1:40677 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:41,041 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/eb652672f66744b98b161d4163284591 2024-11-22T18:51:41,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/eb652672f66744b98b161d4163284591 as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/eb652672f66744b98b161d4163284591 2024-11-22T18:51:41,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/eb652672f66744b98b161d4163284591, entries=1, sequenceid=45, filesize=5.9 K 2024-11-22T18:51:41,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for e10faa3d3c31514930ebfe5adc82aa17 in 433ms, sequenceid=45, compaction requested=false 2024-11-22T18:51:41,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e10faa3d3c31514930ebfe5adc82aa17: 2024-11-22T18:51:41,054 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-22T18:51:41,054 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:41,054 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/d5a752a2c67044b6a2d637f1cc90a09d because midkey is the same as first or last row 2024-11-22T18:51:41,130 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:41,182 WARN [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-22T18:51:41,182 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:41,237 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:51:41,240 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:51:41,241 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:51:41,241 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:51:41,241 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:51:41,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d04364e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:51:41,242 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@568b1686{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:51:41,356 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3740407e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/java.io.tmpdir/jetty-localhost-40039-hadoop-hdfs-3_4_1-tests_jar-_-any-1960008290212801303/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:51:41,356 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e234cf7{HTTP/1.1, (http/1.1)}{localhost:40039} 2024-11-22T18:51:41,356 INFO [Time-limited test {}] server.Server(415): Started @126745ms 2024-11-22T18:51:41,357 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:51:41,461 WARN [Thread-982 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:51:41,469 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1059a855d3b008b9 with lease ID 0x8d42bec303907061: from storage DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52 node DatanodeRegistration(127.0.0.1:45977, datanodeUuid=cac19669-cbaf-42c7-9aa6-abad3a8bfb0e, infoPort=38309, infoSecurePort=0, ipcPort=38035, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T18:51:41,469 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1059a855d3b008b9 with lease ID 0x8d42bec303907061: from storage DS-a186da24-99c4-4b41-bd7a-c3e70c8d56cf node DatanodeRegistration(127.0.0.1:45977, datanodeUuid=cac19669-cbaf-42c7-9aa6-abad3a8bfb0e, infoPort=38309, infoSecurePort=0, ipcPort=38035, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:51:42,231 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:43,006 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@67ef6679[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40553, datanodeUuid=21bac189-52c3-492e-8931-114bf278e511, infoPort=45083, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132):Failed to transfer BP-618561351-172.17.0.2-1732301478132:blk_1073741872_1055 to 127.0.0.1:33799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:43,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741867_1050 (size=6027) 2024-11-22T18:51:43,130 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:43,182 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:44,005 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6928852c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40553, datanodeUuid=21bac189-52c3-492e-8931-114bf278e511, infoPort=45083, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132):Failed to transfer BP-618561351-172.17.0.2-1732301478132:blk_1073741877_1060 to 127.0.0.1:33799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:44,231 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:45,130 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:45,183 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:46,232 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:47,131 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:47,183 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:48,232 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:48,958 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T18:51:49,131 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:49,183 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:49,248 ERROR [FSHLog-0-hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData-prefix:d79ba0c344fb,33031,1732301478977 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:49,249 WARN [FSHLog-0-hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData-prefix:d79ba0c344fb,33031,1732301478977 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:49,249 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C33031%2C1732301478977:(num 1732301479152) roll requested 2024-11-22T18:51:49,249 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C33031%2C1732301478977.1732301509249 2024-11-22T18:51:49,254 WARN [Thread-1003 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:49,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1190636397_22 at /127.0.0.1:45330 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data4]'}, localName='127.0.0.1:45977', datanodeUuid='cac19669-cbaf-42c7-9aa6-abad3a8bfb0e', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741878_1061 to mirror 127.0.0.1:33799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:49,254 WARN [Thread-1003 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45977,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:49,254 WARN [Thread-1003 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741878_1061 2024-11-22T18:51:49,254 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1190636397_22 at /127.0.0.1:45330 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T18:51:49,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1190636397_22 at /127.0.0.1:45330 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:45977:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45330 dst: /127.0.0.1:45977 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:49,255 WARN [Thread-1003 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:49,256 WARN [Thread-1003 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:49,256 WARN [Thread-1003 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK], DatanodeInfoWithStorage[127.0.0.1:45977,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]) is bad. 2024-11-22T18:51:49,256 WARN [Thread-1003 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741879_1062 2024-11-22T18:51:49,256 WARN [Thread-1003 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK] 2024-11-22T18:51:49,260 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:49,260 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:49,261 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:49,261 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:49,261 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:49,261 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977/d79ba0c344fb%2C33031%2C1732301478977.1732301479152 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977/d79ba0c344fb%2C33031%2C1732301478977.1732301509249 2024-11-22T18:51:49,265 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:49,265 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:49,265 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977/d79ba0c344fb%2C33031%2C1732301478977.1732301479152 2024-11-22T18:51:49,265 WARN [IPC Server handler 1 on default port 44035 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977/d79ba0c344fb%2C33031%2C1732301478977.1732301479152 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1006 2024-11-22T18:51:49,266 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977/d79ba0c344fb%2C33031%2C1732301478977.1732301479152 after 1ms 2024-11-22T18:51:49,269 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38309:38309),(127.0.0.1/127.0.0.1:45083:45083)] 2024-11-22T18:51:49,269 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977/d79ba0c344fb%2C33031%2C1732301478977.1732301479152 is not closed yet, will try archiving it next time 2024-11-22T18:51:50,233 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:51,184 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:51,484 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2fdad119 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-618561351-172.17.0.2-1732301478132:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:40677,null,null]) java.net.ConnectException: Call From d79ba0c344fb/172.17.0.2 to localhost:43815 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T18:51:51,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741833_1019 (size=455) 2024-11-22T18:51:52,142 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs/d79ba0c344fb%2C42437%2C1732301479035.1732301479480 2024-11-22T18:51:52,143 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301497136 to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs/d79ba0c344fb%2C42437%2C1732301479035.1732301497136 2024-11-22T18:51:52,233 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:53,184 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:53,267 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/WALs/d79ba0c344fb,33031,1732301478977/d79ba0c344fb%2C33031%2C1732301478977.1732301479152 after 4002ms 2024-11-22T18:51:54,233 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:54,468 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@365e639d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45977, datanodeUuid=cac19669-cbaf-42c7-9aa6-abad3a8bfb0e, infoPort=38309, infoSecurePort=0, ipcPort=38035, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132):Failed to transfer BP-618561351-172.17.0.2-1732301478132:blk_1073741833_1019 to 127.0.0.1:33799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:54,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741835_1011 (size=393) 2024-11-22T18:51:55,185 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:55,467 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@365e639d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45977, datanodeUuid=cac19669-cbaf-42c7-9aa6-abad3a8bfb0e, infoPort=38309, infoSecurePort=0, ipcPort=38035, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132):Failed to transfer BP-618561351-172.17.0.2-1732301478132:blk_1073741829_1005 to 127.0.0.1:33799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:55,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:51:56,234 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:56,698 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C42437%2C1732301479035.1732301516698 2024-11-22T18:51:56,704 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:56,704 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:56,704 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:56,704 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:56,704 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:56,705 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301499160 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301516698 2024-11-22T18:51:56,705 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45083:45083),(127.0.0.1/127.0.0.1:38309:38309)] 2024-11-22T18:51:56,705 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301499160 is not closed yet, will try archiving it next time 2024-11-22T18:51:56,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741862_1045 (size=13591) 2024-11-22T18:51:56,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42437 {}] regionserver.HRegion(8855): Flush requested on e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:56,716 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e10faa3d3c31514930ebfe5adc82aa17 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T18:51:56,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/0e56632448464a1eb2ff7c6a06dce65b is 1080, key is row0013/info:/1732301516707/Put/seqid=0 2024-11-22T18:51:56,723 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:56,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:38140 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741882_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741882_1066 to mirror 127.0.0.1:33799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:56,724 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:56,724 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741882_1066 2024-11-22T18:51:56,724 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:38140 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741882_1066] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T18:51:56,724 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:38140 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741882_1066] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38140 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:56,724 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:56,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741883_1067 (size=11421) 2024-11-22T18:51:56,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741883_1067 (size=11421) 2024-11-22T18:51:56,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/0e56632448464a1eb2ff7c6a06dce65b 2024-11-22T18:51:56,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/0e56632448464a1eb2ff7c6a06dce65b as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/0e56632448464a1eb2ff7c6a06dce65b 2024-11-22T18:51:56,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/0e56632448464a1eb2ff7c6a06dce65b, entries=6, sequenceid=55, filesize=11.2 K 2024-11-22T18:51:56,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for e10faa3d3c31514930ebfe5adc82aa17 in 26ms, sequenceid=55, compaction requested=true 2024-11-22T18:51:56,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e10faa3d3c31514930ebfe5adc82aa17: 2024-11-22T18:51:56,743 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-22T18:51:56,743 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:56,743 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/d5a752a2c67044b6a2d637f1cc90a09d because midkey is the same as first or last row 2024-11-22T18:51:56,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e10faa3d3c31514930ebfe5adc82aa17:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:51:56,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:51:56,743 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:51:56,744 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:51:56,744 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HStore(1541): e10faa3d3c31514930ebfe5adc82aa17/info is initiating minor compaction (all files) 2024-11-22T18:51:56,744 INFO [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e10faa3d3c31514930ebfe5adc82aa17/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:56,744 INFO [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/d5a752a2c67044b6a2d637f1cc90a09d, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/eb652672f66744b98b161d4163284591, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/0e56632448464a1eb2ff7c6a06dce65b] into tmpdir=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp, totalSize=34.6 K 2024-11-22T18:51:56,745 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5a752a2c67044b6a2d637f1cc90a09d, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732301493126 2024-11-22T18:51:56,745 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] compactions.Compactor(225): Compacting eb652672f66744b98b161d4163284591, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732301500620 2024-11-22T18:51:56,745 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0e56632448464a1eb2ff7c6a06dce65b, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732301501026 2024-11-22T18:51:56,761 INFO [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e10faa3d3c31514930ebfe5adc82aa17#info#compaction#24 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:51:56,762 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/f1cc86891d4041459de5e7cb03f5d25f is 1080, key is row0002/info:/1732301493126/Put/seqid=0 2024-11-22T18:51:56,764 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:56,764 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK], DatanodeInfoWithStorage[127.0.0.1:45977,DS-836af7c1-fa42-4527-b4b7-c2fbb82a8f52,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:56,764 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741884_1068 2024-11-22T18:51:56,765 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:56,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741885_1069 (size=23502) 2024-11-22T18:51:56,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741885_1069 (size=23502) 2024-11-22T18:51:56,777 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/f1cc86891d4041459de5e7cb03f5d25f as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/f1cc86891d4041459de5e7cb03f5d25f 2024-11-22T18:51:56,784 INFO [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e10faa3d3c31514930ebfe5adc82aa17/info of e10faa3d3c31514930ebfe5adc82aa17 into f1cc86891d4041459de5e7cb03f5d25f(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:51:56,784 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e10faa3d3c31514930ebfe5adc82aa17: 2024-11-22T18:51:56,784 INFO [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17., storeName=e10faa3d3c31514930ebfe5adc82aa17/info, priority=13, startTime=1732301516743; duration=0sec 2024-11-22T18:51:56,784 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-22T18:51:56,785 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:56,785 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/f1cc86891d4041459de5e7cb03f5d25f because midkey is the same as first or last row 2024-11-22T18:51:56,785 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-22T18:51:56,785 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:56,785 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/f1cc86891d4041459de5e7cb03f5d25f because midkey is the same as first or last row 2024-11-22T18:51:56,785 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-22T18:51:56,785 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:56,785 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/f1cc86891d4041459de5e7cb03f5d25f because midkey is the same as first or last row 2024-11-22T18:51:56,785 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:51:56,785 DEBUG [RS:0;d79ba0c344fb:42437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e10faa3d3c31514930ebfe5adc82aa17:info 2024-11-22T18:51:56,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42437 {}] regionserver.HRegion(8855): Flush requested on e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:56,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e10faa3d3c31514930ebfe5adc82aa17 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T18:51:56,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/f03eb14e1f614b16af543fabf83e47f5 is 1080, key is row0018/info:/1732301516717/Put/seqid=0 2024-11-22T18:51:56,938 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:38190 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741886_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741886_1070 to mirror 127.0.0.1:33799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:56,938 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:56,938 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:38190 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741886_1070] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T18:51:56,938 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741886_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:56,938 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741886_1070 2024-11-22T18:51:56,938 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:38190 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741886_1070] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38190 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:56,938 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:56,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741887_1071 (size=11421) 2024-11-22T18:51:56,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741887_1071 (size=11421) 2024-11-22T18:51:56,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/f03eb14e1f614b16af543fabf83e47f5 2024-11-22T18:51:56,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/f03eb14e1f614b16af543fabf83e47f5 as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/f03eb14e1f614b16af543fabf83e47f5 2024-11-22T18:51:56,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/f03eb14e1f614b16af543fabf83e47f5, entries=6, sequenceid=66, filesize=11.2 K 2024-11-22T18:51:56,956 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for e10faa3d3c31514930ebfe5adc82aa17 in 26ms, sequenceid=66, compaction requested=false 2024-11-22T18:51:56,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e10faa3d3c31514930ebfe5adc82aa17: 2024-11-22T18:51:56,957 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-22T18:51:56,957 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:51:56,957 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/f1cc86891d4041459de5e7cb03f5d25f because midkey is the same as first or last row 2024-11-22T18:51:57,107 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.1732301499160 to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs/d79ba0c344fb%2C42437%2C1732301479035.1732301499160 2024-11-22T18:51:57,185 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-22T18:51:57,185 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:57,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T18:51:57,334 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:51:57,335 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:51:57,335 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:57,335 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:57,335 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T18:51:57,335 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T18:51:57,335 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1779764987, stopped=false 2024-11-22T18:51:57,335 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d79ba0c344fb,33031,1732301478977 2024-11-22T18:51:57,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:57,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35983-0x1014104c6c00002, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:57,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:51:57,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:57,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:57,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35983-0x1014104c6c00002, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:51:57,337 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:51:57,337 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:51:57,337 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:51:57,338 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:57,338 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd79ba0c344fb,42437,1732301479035' ***** 2024-11-22T18:51:57,338 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T18:51:57,338 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd79ba0c344fb,35983,1732301480111' ***** 2024-11-22T18:51:57,338 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T18:51:57,338 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35983-0x1014104c6c00002, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:57,338 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T18:51:57,338 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:57,338 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T18:51:57,339 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T18:51:57,339 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T18:51:57,339 INFO [RS:0;d79ba0c344fb:42437 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T18:51:57,339 INFO [RS:0;d79ba0c344fb:42437 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T18:51:57,339 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(3091): Received CLOSE for e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:57,339 INFO [RS:1;d79ba0c344fb:35983 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T18:51:57,339 INFO [RS:1;d79ba0c344fb:35983 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T18:51:57,339 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(959): stopping server d79ba0c344fb,42437,1732301479035 2024-11-22T18:51:57,339 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(959): stopping server d79ba0c344fb,35983,1732301480111 2024-11-22T18:51:57,339 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:51:57,339 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:51:57,339 INFO [RS:1;d79ba0c344fb:35983 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;d79ba0c344fb:35983. 2024-11-22T18:51:57,339 INFO [RS:0;d79ba0c344fb:42437 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d79ba0c344fb:42437. 2024-11-22T18:51:57,340 DEBUG [RS:1;d79ba0c344fb:35983 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:51:57,340 DEBUG [RS:1;d79ba0c344fb:35983 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:57,340 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:51:57,340 DEBUG [RS:0;d79ba0c344fb:42437 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:51:57,340 DEBUG [RS:0;d79ba0c344fb:42437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:51:57,340 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e10faa3d3c31514930ebfe5adc82aa17, disabling compactions & flushes 2024-11-22T18:51:57,340 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(976): stopping server d79ba0c344fb,35983,1732301480111; all regions closed. 2024-11-22T18:51:57,340 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T18:51:57,340 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:57,340 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T18:51:57,340 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T18:51:57,340 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:57,340 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T18:51:57,340 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. after waiting 0 ms 2024-11-22T18:51:57,340 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:57,340 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T18:51:57,340 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing e10faa3d3c31514930ebfe5adc82aa17 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T18:51:57,340 DEBUG [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, e10faa3d3c31514930ebfe5adc82aa17=TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.} 2024-11-22T18:51:57,340 DEBUG [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e10faa3d3c31514930ebfe5adc82aa17 2024-11-22T18:51:57,340 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:51:57,340 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,340 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:51:57,341 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:51:57,341 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,341 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:51:57,341 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:51:57,341 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,341 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,341 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-22T18:51:57,341 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,341 ERROR [FSHLog-0-hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e-prefix:d79ba0c344fb,42437,1732301479035.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:57,341 WARN [FSHLog-0-hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e-prefix:d79ba0c344fb,42437,1732301479035.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:57,341 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C42437%2C1732301479035.meta:.meta(num 1732301479898) roll requested 2024-11-22T18:51:57,341 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:57,342 INFO [regionserver/d79ba0c344fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:51:57,342 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:57,342 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C42437%2C1732301479035.meta.1732301517342.meta 2024-11-22T18:51:57,342 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 2024-11-22T18:51:57,342 WARN [IPC Server handler 4 on default port 44035 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741837_1013 2024-11-22T18:51:57,343 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 after 1ms 2024-11-22T18:51:57,345 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/96cff278830743e6a1bd622ac0d504f6 is 1079, key is tmprow/info:/1732301517132/Put/seqid=0 2024-11-22T18:51:57,346 WARN [Thread-1048 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:57,346 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:38216 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741888_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10]'}, localName='127.0.0.1:40553', datanodeUuid='21bac189-52c3-492e-8931-114bf278e511', xmitsInProgress=0}:Exception transferring block BP-618561351-172.17.0.2-1732301478132:blk_1073741888_1073 to mirror 127.0.0.1:33799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:57,346 WARN [Thread-1048 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741888_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK], DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:57,346 WARN [Thread-1048 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741888_1073 2024-11-22T18:51:57,346 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:38216 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741888_1073] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T18:51:57,346 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-463490505_22 at /127.0.0.1:38216 [Receiving block BP-618561351-172.17.0.2-1732301478132:blk_1073741888_1073] {}] datanode.DataXceiver(331): 127.0.0.1:40553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38216 dst: /127.0.0.1:40553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:57,346 WARN [Thread-1048 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:57,353 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,353 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741889_1074 (size=6027) 2024-11-22T18:51:57,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741889_1074 (size=6027) 2024-11-22T18:51:57,353 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,353 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,354 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,354 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301517342.meta 2024-11-22T18:51:57,354 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/96cff278830743e6a1bd622ac0d504f6 2024-11-22T18:51:57,354 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:57,354 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40677,DS-4a9dfded-d8a3-43b3-910b-b0c2b5742369,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:57,354 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta 2024-11-22T18:51:57,354 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38309:38309),(127.0.0.1/127.0.0.1:45083:45083)] 2024-11-22T18:51:57,354 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta is not closed yet, will try archiving it next time 2024-11-22T18:51:57,355 WARN [IPC Server handler 4 on default port 44035 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741834_1010 2024-11-22T18:51:57,355 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta after 1ms 2024-11-22T18:51:57,361 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/.tmp/info/96cff278830743e6a1bd622ac0d504f6 as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/96cff278830743e6a1bd622ac0d504f6 2024-11-22T18:51:57,366 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/96cff278830743e6a1bd622ac0d504f6, entries=1, sequenceid=70, filesize=5.9 K 2024-11-22T18:51:57,367 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e10faa3d3c31514930ebfe5adc82aa17 in 27ms, sequenceid=70, compaction requested=true 2024-11-22T18:51:57,367 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/e2b6bc41715d402c86142e2e3a32ed57, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/7eeac4764d2a45748383b30aa015bd1e, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/d5a752a2c67044b6a2d637f1cc90a09d, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/98f17b0c5f984f0d8f945a7d89f93ef1, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/eb652672f66744b98b161d4163284591, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/0e56632448464a1eb2ff7c6a06dce65b] to archive 2024-11-22T18:51:57,368 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T18:51:57,370 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/e2b6bc41715d402c86142e2e3a32ed57 to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/e2b6bc41715d402c86142e2e3a32ed57 2024-11-22T18:51:57,372 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/.tmp/info/5f14e4929d2b4303b037fbe41c75a66f is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17./info:regioninfo/1732301480676/Put/seqid=0 2024-11-22T18:51:57,373 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/7eeac4764d2a45748383b30aa015bd1e to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/7eeac4764d2a45748383b30aa015bd1e 2024-11-22T18:51:57,373 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:57,374 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:57,374 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741891_1077 2024-11-22T18:51:57,375 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:57,375 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/d5a752a2c67044b6a2d637f1cc90a09d to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/d5a752a2c67044b6a2d637f1cc90a09d 2024-11-22T18:51:57,377 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/98f17b0c5f984f0d8f945a7d89f93ef1 to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/98f17b0c5f984f0d8f945a7d89f93ef1 2024-11-22T18:51:57,378 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/eb652672f66744b98b161d4163284591 to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/eb652672f66744b98b161d4163284591 2024-11-22T18:51:57,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741892_1078 (size=7089) 2024-11-22T18:51:57,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741892_1078 (size=7089) 2024-11-22T18:51:57,380 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/.tmp/info/5f14e4929d2b4303b037fbe41c75a66f 2024-11-22T18:51:57,380 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/0e56632448464a1eb2ff7c6a06dce65b to hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/info/0e56632448464a1eb2ff7c6a06dce65b 2024-11-22T18:51:57,380 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d79ba0c344fb:33031 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-22T18:51:57,381 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e2b6bc41715d402c86142e2e3a32ed57=10347, 7eeac4764d2a45748383b30aa015bd1e=12506, d5a752a2c67044b6a2d637f1cc90a09d=17994, 98f17b0c5f984f0d8f945a7d89f93ef1=6027, eb652672f66744b98b161d4163284591=6027, 0e56632448464a1eb2ff7c6a06dce65b=11421] 2024-11-22T18:51:57,385 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e10faa3d3c31514930ebfe5adc82aa17/recovered.edits/73.seqid, newMaxSeqId=73, maxSeqId=1 2024-11-22T18:51:57,385 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:57,386 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e10faa3d3c31514930ebfe5adc82aa17: Waiting for close lock at 1732301517339Running coprocessor pre-close hooks at 1732301517339Disabling compacts and flushes for region at 1732301517339Disabling writes for close at 1732301517340 (+1 ms)Obtaining lock to block concurrent updates at 1732301517340Preparing flush snapshotting stores in e10faa3d3c31514930ebfe5adc82aa17 at 1732301517340Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17., syncing WAL and waiting on mvcc, flushsize=dataSize=1075, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732301517341 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. at 1732301517342 (+1 ms)Flushing e10faa3d3c31514930ebfe5adc82aa17/info: creating writer at 1732301517342Flushing e10faa3d3c31514930ebfe5adc82aa17/info: appending metadata at 1732301517345 (+3 ms)Flushing e10faa3d3c31514930ebfe5adc82aa17/info: closing flushed file at 1732301517345Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a01c5ab: reopening flushed file at 1732301517360 (+15 ms)Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e10faa3d3c31514930ebfe5adc82aa17 in 27ms, sequenceid=70, compaction requested=true at 1732301517367 (+7 ms)Writing region close event to WAL at 1732301517381 (+14 ms)Running coprocessor post-close hooks at 1732301517385 (+4 ms)Closed at 1732301517385 2024-11-22T18:51:57,386 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732301480289.e10faa3d3c31514930ebfe5adc82aa17. 2024-11-22T18:51:57,399 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/.tmp/ns/adc751d9e81b458697de58d22b4f2546 is 43, key is default/ns:d/1732301480000/Put/seqid=0 2024-11-22T18:51:57,401 WARN [Thread-1068 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:51:57,401 WARN [Thread-1068 {}] hdfs.DataStreamer(1731): Error Recovery for BP-618561351-172.17.0.2-1732301478132:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK], DatanodeInfoWithStorage[127.0.0.1:40553,DS-66ee2b9c-500a-4ee8-b156-fc240374cf40,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK]) is bad. 2024-11-22T18:51:57,401 WARN [Thread-1068 {}] hdfs.DataStreamer(1850): Abandoning BP-618561351-172.17.0.2-1732301478132:blk_1073741893_1079 2024-11-22T18:51:57,402 WARN [Thread-1068 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33799,DS-8585a096-2fa1-4304-9e66-228a52d53d0d,DISK] 2024-11-22T18:51:57,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741894_1080 (size=5153) 2024-11-22T18:51:57,406 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/.tmp/ns/adc751d9e81b458697de58d22b4f2546 2024-11-22T18:51:57,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741894_1080 (size=5153) 2024-11-22T18:51:57,426 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/.tmp/table/c20af76d89634a03920cfe5022bbb7b8 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732301480691/Put/seqid=0 2024-11-22T18:51:57,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741895_1081 (size=5424) 2024-11-22T18:51:57,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741895_1081 (size=5424) 2024-11-22T18:51:57,431 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/.tmp/table/c20af76d89634a03920cfe5022bbb7b8 2024-11-22T18:51:57,437 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/.tmp/info/5f14e4929d2b4303b037fbe41c75a66f as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/info/5f14e4929d2b4303b037fbe41c75a66f 2024-11-22T18:51:57,442 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/info/5f14e4929d2b4303b037fbe41c75a66f, entries=10, sequenceid=11, filesize=6.9 K 2024-11-22T18:51:57,443 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/.tmp/ns/adc751d9e81b458697de58d22b4f2546 as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/ns/adc751d9e81b458697de58d22b4f2546 2024-11-22T18:51:57,448 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/ns/adc751d9e81b458697de58d22b4f2546, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T18:51:57,449 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/.tmp/table/c20af76d89634a03920cfe5022bbb7b8 as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/table/c20af76d89634a03920cfe5022bbb7b8 2024-11-22T18:51:57,455 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/table/c20af76d89634a03920cfe5022bbb7b8, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T18:51:57,456 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 115ms, sequenceid=11, compaction requested=false 2024-11-22T18:51:57,460 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T18:51:57,461 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:51:57,461 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:51:57,461 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301517340Running coprocessor pre-close hooks at 1732301517340Disabling compacts and flushes for region at 1732301517340Disabling writes for close at 1732301517341 (+1 ms)Obtaining lock to block concurrent updates at 1732301517341Preparing flush snapshotting stores in 1588230740 at 1732301517341Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732301517341Flushing stores of hbase:meta,,1.1588230740 at 1732301517355 (+14 ms)Flushing 1588230740/info: creating writer at 1732301517355Flushing 1588230740/info: appending metadata at 1732301517371 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732301517371Flushing 1588230740/ns: creating writer at 1732301517385 (+14 ms)Flushing 1588230740/ns: appending metadata at 1732301517399 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732301517399Flushing 1588230740/table: creating writer at 1732301517411 (+12 ms)Flushing 1588230740/table: appending metadata at 1732301517425 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732301517425Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@367d5ac4: reopening flushed file at 1732301517436 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50bb2b65: reopening flushed file at 1732301517442 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8ece7a4: reopening flushed file at 1732301517449 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 115ms, sequenceid=11, compaction requested=false at 1732301517456 (+7 ms)Writing region close event to WAL at 1732301517457 (+1 ms)Running coprocessor post-close hooks at 1732301517461 (+4 ms)Closed at 1732301517461 2024-11-22T18:51:57,461 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T18:51:57,541 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(976): stopping server d79ba0c344fb,42437,1732301479035; all regions closed. 2024-11-22T18:51:57,541 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,541 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,541 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,541 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,542 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:51:57,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741890_1075 (size=825) 2024-11-22T18:51:57,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741890_1075 (size=825) 2024-11-22T18:51:58,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741862_1045 (size=13591) 2024-11-22T18:51:58,240 INFO [regionserver/d79ba0c344fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T18:51:58,240 INFO [regionserver/d79ba0c344fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T18:51:58,241 INFO [regionserver/d79ba0c344fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:51:58,400 INFO [regionserver/d79ba0c344fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T18:51:58,401 INFO [regionserver/d79ba0c344fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T18:51:58,467 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4ff52e56[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45977, datanodeUuid=cac19669-cbaf-42c7-9aa6-abad3a8bfb0e, infoPort=38309, infoSecurePort=0, ipcPort=38035, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132):Failed to transfer BP-618561351-172.17.0.2-1732301478132:blk_1073741836_1012 to 127.0.0.1:33799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:58,467 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@365e639d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45977, datanodeUuid=cac19669-cbaf-42c7-9aa6-abad3a8bfb0e, infoPort=38309, infoSecurePort=0, ipcPort=38035, storageInfo=lv=-57;cid=testClusterID;nsid=1373837700;c=1732301478132):Failed to transfer BP-618561351-172.17.0.2-1732301478132:blk_1073741832_1008 to 127.0.0.1:33799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:51:59,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:51:59,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:51:59,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T18:51:59,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:51:59,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T18:52:00,285 INFO [master/d79ba0c344fb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T18:52:00,285 INFO [master/d79ba0c344fb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T18:52:00,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:52:00,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:52:01,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741838_1020 (size=3600) 2024-11-22T18:52:01,344 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 after 4002ms 2024-11-22T18:52:01,356 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta after 4002ms 2024-11-22T18:52:01,488 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4ed1b73d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-618561351-172.17.0.2-1732301478132:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:40677,null,null]) java.net.ConnectException: Call From d79ba0c344fb/172.17.0.2 to localhost:43815 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T18:52:02,342 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-22T18:52:02,344 DEBUG [RS:1;d79ba0c344fb:35983 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs 2024-11-22T18:52:02,344 INFO [RS:1;d79ba0c344fb:35983 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C35983%2C1732301480111:(num 1732301480390) 2024-11-22T18:52:02,344 DEBUG [RS:1;d79ba0c344fb:35983 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:52:02,344 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:52:02,345 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:52:02,345 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.ChoreService(370): Chore service for: regionserver/d79ba0c344fb:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T18:52:02,345 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T18:52:02,345 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:52:02,345 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T18:52:02,345 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T18:52:02,345 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:52:02,345 INFO [RS:1;d79ba0c344fb:35983 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35983 2024-11-22T18:52:02,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35983-0x1014104c6c00002, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d79ba0c344fb,35983,1732301480111 2024-11-22T18:52:02,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:02,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:52:02,351 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:52:02,352 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d79ba0c344fb,35983,1732301480111] 2024-11-22T18:52:02,355 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d79ba0c344fb,35983,1732301480111 already deleted, retry=false 2024-11-22T18:52:02,355 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d79ba0c344fb,35983,1732301480111 expired; onlineServers=1 2024-11-22T18:52:02,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,453 INFO [RS:1;d79ba0c344fb:35983 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:52:02,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35983-0x1014104c6c00002, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:52:02,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35983-0x1014104c6c00002, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:52:02,454 INFO [RS:1;d79ba0c344fb:35983 {}] regionserver.HRegionServer(1031): Exiting; stopping=d79ba0c344fb,35983,1732301480111; zookeeper connection closed. 2024-11-22T18:52:02,455 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4493cbf5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4493cbf5 2024-11-22T18:52:02,542 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-22T18:52:02,546 DEBUG [RS:0;d79ba0c344fb:42437 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs 2024-11-22T18:52:02,546 INFO [RS:0;d79ba0c344fb:42437 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C42437%2C1732301479035.meta:.meta(num 1732301517342) 2024-11-22T18:52:02,546 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:02,546 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:02,547 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:02,547 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:02,547 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:02,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741881_1065 (size=18156) 2024-11-22T18:52:02,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741881_1065 (size=18156) 2024-11-22T18:52:02,552 DEBUG [RS:0;d79ba0c344fb:42437 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/oldWALs 2024-11-22T18:52:02,552 INFO [RS:0;d79ba0c344fb:42437 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C42437%2C1732301479035:(num 1732301516698) 2024-11-22T18:52:02,552 DEBUG [RS:0;d79ba0c344fb:42437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:52:02,552 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:52:02,552 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:52:02,552 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.ChoreService(370): Chore service for: regionserver/d79ba0c344fb:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T18:52:02,552 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:52:02,552 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:52:02,552 INFO [RS:0;d79ba0c344fb:42437 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42437 2024-11-22T18:52:02,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d79ba0c344fb,42437,1732301479035 2024-11-22T18:52:02,554 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:52:02,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:52:02,556 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d79ba0c344fb,42437,1732301479035] 2024-11-22T18:52:02,557 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d79ba0c344fb,42437,1732301479035 already deleted, retry=false 2024-11-22T18:52:02,557 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d79ba0c344fb,42437,1732301479035 expired; onlineServers=0 2024-11-22T18:52:02,557 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd79ba0c344fb,33031,1732301478977' ***** 2024-11-22T18:52:02,557 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T18:52:02,558 INFO [M:0;d79ba0c344fb:33031 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:52:02,558 INFO [M:0;d79ba0c344fb:33031 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:52:02,558 DEBUG [M:0;d79ba0c344fb:33031 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T18:52:02,558 DEBUG [M:0;d79ba0c344fb:33031 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T18:52:02,558 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T18:52:02,558 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301479256 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301479256,5,FailOnTimeoutGroup] 2024-11-22T18:52:02,558 INFO [M:0;d79ba0c344fb:33031 {}] hbase.ChoreService(370): Chore service for: master/d79ba0c344fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T18:52:02,558 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301479256 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301479256,5,FailOnTimeoutGroup] 2024-11-22T18:52:02,558 INFO [M:0;d79ba0c344fb:33031 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:52:02,558 DEBUG [M:0;d79ba0c344fb:33031 {}] master.HMaster(1795): Stopping service threads 2024-11-22T18:52:02,558 INFO [M:0;d79ba0c344fb:33031 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T18:52:02,559 INFO [M:0;d79ba0c344fb:33031 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:52:02,559 INFO [M:0;d79ba0c344fb:33031 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T18:52:02,559 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T18:52:02,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T18:52:02,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:02,561 DEBUG [M:0;d79ba0c344fb:33031 {}] zookeeper.ZKUtil(347): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T18:52:02,561 WARN [M:0;d79ba0c344fb:33031 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T18:52:02,562 INFO [M:0;d79ba0c344fb:33031 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/.lastflushedseqids 2024-11-22T18:52:02,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741896_1082 (size=130) 2024-11-22T18:52:02,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741896_1082 (size=130) 2024-11-22T18:52:02,569 INFO [M:0;d79ba0c344fb:33031 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T18:52:02,569 INFO [M:0;d79ba0c344fb:33031 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T18:52:02,569 DEBUG [M:0;d79ba0c344fb:33031 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:52:02,569 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:02,569 DEBUG [M:0;d79ba0c344fb:33031 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:02,569 DEBUG [M:0;d79ba0c344fb:33031 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:52:02,569 DEBUG [M:0;d79ba0c344fb:33031 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:02,569 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-22T18:52:02,588 DEBUG [M:0;d79ba0c344fb:33031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6c35ad4e62ab4d5596fbc276cb799c2a is 82, key is hbase:meta,,1/info:regioninfo/1732301479976/Put/seqid=0 2024-11-22T18:52:02,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741897_1083 (size=5672) 2024-11-22T18:52:02,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741897_1083 (size=5672) 2024-11-22T18:52:02,597 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6c35ad4e62ab4d5596fbc276cb799c2a 2024-11-22T18:52:02,618 DEBUG [M:0;d79ba0c344fb:33031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9e337bb4fe794daf9873254bf6a9201b is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732301480697/Put/seqid=0 2024-11-22T18:52:02,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741898_1084 (size=6256) 2024-11-22T18:52:02,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741898_1084 (size=6256) 2024-11-22T18:52:02,624 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9e337bb4fe794daf9873254bf6a9201b 2024-11-22T18:52:02,630 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9e337bb4fe794daf9873254bf6a9201b 2024-11-22T18:52:02,648 DEBUG [M:0;d79ba0c344fb:33031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f0c8443ab1854829a3dd3e05df53bb31 is 69, key is d79ba0c344fb,35983,1732301480111/rs:state/1732301480192/Put/seqid=0 2024-11-22T18:52:02,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:52:02,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42437-0x1014104c6c00001, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:52:02,656 INFO [RS:0;d79ba0c344fb:42437 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:52:02,657 INFO [RS:0;d79ba0c344fb:42437 {}] regionserver.HRegionServer(1031): Exiting; stopping=d79ba0c344fb,42437,1732301479035; zookeeper connection closed. 2024-11-22T18:52:02,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741899_1085 (size=5224) 2024-11-22T18:52:02,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741899_1085 (size=5224) 2024-11-22T18:52:02,658 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f0c8443ab1854829a3dd3e05df53bb31 2024-11-22T18:52:02,658 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@b29dc54 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@b29dc54 2024-11-22T18:52:02,658 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-22T18:52:02,686 DEBUG [M:0;d79ba0c344fb:33031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/80bb4276616e4e519587d33f23aedb67 is 52, key is load_balancer_on/state:d/1732301480085/Put/seqid=0 2024-11-22T18:52:02,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741900_1086 (size=5056) 2024-11-22T18:52:02,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741900_1086 (size=5056) 2024-11-22T18:52:02,712 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/80bb4276616e4e519587d33f23aedb67 2024-11-22T18:52:02,718 DEBUG [M:0;d79ba0c344fb:33031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6c35ad4e62ab4d5596fbc276cb799c2a as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6c35ad4e62ab4d5596fbc276cb799c2a 2024-11-22T18:52:02,725 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6c35ad4e62ab4d5596fbc276cb799c2a, entries=8, sequenceid=60, filesize=5.5 K 2024-11-22T18:52:02,726 DEBUG [M:0;d79ba0c344fb:33031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9e337bb4fe794daf9873254bf6a9201b as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9e337bb4fe794daf9873254bf6a9201b 2024-11-22T18:52:02,734 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9e337bb4fe794daf9873254bf6a9201b 2024-11-22T18:52:02,734 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9e337bb4fe794daf9873254bf6a9201b, entries=6, sequenceid=60, filesize=6.1 K 2024-11-22T18:52:02,740 DEBUG [M:0;d79ba0c344fb:33031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f0c8443ab1854829a3dd3e05df53bb31 as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f0c8443ab1854829a3dd3e05df53bb31 2024-11-22T18:52:02,747 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f0c8443ab1854829a3dd3e05df53bb31, entries=2, sequenceid=60, filesize=5.1 K 2024-11-22T18:52:02,748 DEBUG [M:0;d79ba0c344fb:33031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/80bb4276616e4e519587d33f23aedb67 as hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/80bb4276616e4e519587d33f23aedb67 2024-11-22T18:52:02,755 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/80bb4276616e4e519587d33f23aedb67, entries=1, sequenceid=60, filesize=4.9 K 2024-11-22T18:52:02,756 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=60, compaction requested=false 2024-11-22T18:52:02,758 INFO [M:0;d79ba0c344fb:33031 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:02,758 DEBUG [M:0;d79ba0c344fb:33031 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301522569Disabling compacts and flushes for region at 1732301522569Disabling writes for close at 1732301522569Obtaining lock to block concurrent updates at 1732301522569Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732301522569Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1732301522570 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732301522570Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732301522570Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732301522588 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732301522588Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732301522603 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732301522617 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732301522618 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732301522630 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732301522647 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732301522647Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732301522664 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732301522685 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732301522685Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@666acd8c: reopening flushed file at 1732301522717 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61e39408: reopening flushed file at 1732301522725 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@476c6aee: reopening flushed file at 1732301522735 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1688898e: reopening flushed file at 1732301522747 (+12 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=60, compaction requested=false at 1732301522756 (+9 ms)Writing region close event to WAL at 1732301522758 (+2 ms)Closed at 1732301522758 2024-11-22T18:52:02,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:02,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:02,760 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:02,760 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:02,760 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:02,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40553 is added to blk_1073741880_1063 (size=1045) 2024-11-22T18:52:02,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45977 is added to blk_1073741880_1063 (size=1045) 2024-11-22T18:52:02,766 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:52:02,767 INFO [M:0;d79ba0c344fb:33031 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T18:52:02,767 INFO [M:0;d79ba0c344fb:33031 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33031 2024-11-22T18:52:02,767 INFO [M:0;d79ba0c344fb:33031 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:52:02,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:52:02,869 INFO [M:0;d79ba0c344fb:33031 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:52:02,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33031-0x1014104c6c00000, quorum=127.0.0.1:59152, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:52:02,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3740407e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:02,876 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e234cf7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:52:02,876 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:52:02,877 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@568b1686{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:52:02,877 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d04364e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,STOPPED} 2024-11-22T18:52:02,878 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@cde00ca {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-618561351-172.17.0.2-1732301478132:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:40677,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:43815 , LocalHost:localPort d79ba0c344fb/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T18:52:02,880 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-618561351-172.17.0.2-1732301478132 (Datanode Uuid cac19669-cbaf-42c7-9aa6-abad3a8bfb0e) service to localhost/127.0.0.1:44035 2024-11-22T18:52:02,881 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data3/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:02,881 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data4/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:02,881 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:52:02,882 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@cde00ca {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40677,null,null]) java.io.IOException: No block pool offer service for bpid=BP-618561351-172.17.0.2-1732301478132 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:02,882 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@cde00ca {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45977,null,null]) java.io.IOException: No block pool offer service for bpid=BP-618561351-172.17.0.2-1732301478132 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:02,882 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@cde00ca {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40677,null,null], DatanodeInfoWithStorage[127.0.0.1:45977,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-618561351-172.17.0.2-1732301478132:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:40677,null,null], DatanodeInfoWithStorage[127.0.0.1:45977,null,null]] 2024-11-22T18:52:02,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30add41a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:02,886 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fd17220{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:52:02,886 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:52:02,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dc59954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:52:02,887 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ff2063{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,STOPPED} 2024-11-22T18:52:02,888 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:52:02,888 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:52:02,888 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:52:02,888 WARN [BP-618561351-172.17.0.2-1732301478132 heartbeating to localhost/127.0.0.1:44035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-618561351-172.17.0.2-1732301478132 (Datanode Uuid 21bac189-52c3-492e-8931-114bf278e511) service to localhost/127.0.0.1:44035 2024-11-22T18:52:02,889 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data9/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:02,889 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/cluster_4f21cbac-5e31-785f-1da7-87f1a9c3aa50/data/data10/current/BP-618561351-172.17.0.2-1732301478132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:02,890 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:52:02,897 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62b96b7c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:52:02,897 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fd186ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:52:02,898 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:52:02,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2305029e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:52:02,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42b52d44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir/,STOPPED} 2024-11-22T18:52:02,914 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T18:52:02,914 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T18:52:02,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,941 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:02,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T18:52:02,974 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 78) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:38965 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44035 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44035 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38965 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44035 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fab40bf5b70.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44035 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44035 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44035 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fab40bf5b70.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44035 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44035 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44035 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:44035 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:44035 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=454 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=141 (was 141), ProcessCount=11 (was 11), AvailableMemoryMB=7632 (was 8024) 2024-11-22T18:52:02,988 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=141, ProcessCount=11, AvailableMemoryMB=7629 2024-11-22T18:52:02,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T18:52:02,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.log.dir so I do NOT create it in target/test-data/70cded03-1906-38ee-9529-3992d66f34e6 2024-11-22T18:52:02,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4359b79-a465-d70b-b5a8-fb2671b77157/hadoop.tmp.dir so I do NOT create it in target/test-data/70cded03-1906-38ee-9529-3992d66f34e6 2024-11-22T18:52:02,989 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4, deleteOnExit=true 2024-11-22T18:52:02,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T18:52:02,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/test.cache.data in system properties and HBase conf 2024-11-22T18:52:02,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T18:52:02,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir in system properties and HBase conf 2024-11-22T18:52:02,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T18:52:02,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T18:52:02,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T18:52:02,990 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T18:52:02,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:52:02,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:52:02,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T18:52:02,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:52:02,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T18:52:02,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T18:52:02,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:52:02,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:52:02,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T18:52:02,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/nfs.dump.dir in system properties and HBase conf 2024-11-22T18:52:02,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/java.io.tmpdir in system properties and HBase conf 2024-11-22T18:52:02,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:52:02,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T18:52:02,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T18:52:03,011 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:52:03,093 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:03,097 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:52:03,100 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:52:03,100 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:52:03,100 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:52:03,101 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:03,102 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13c2425d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:52:03,102 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ddfefdc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:52:03,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4702e786{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/java.io.tmpdir/jetty-localhost-37645-hadoop-hdfs-3_4_1-tests_jar-_-any-1726471010083684574/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:52:03,239 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38a1581{HTTP/1.1, (http/1.1)}{localhost:37645} 2024-11-22T18:52:03,239 INFO [Time-limited test {}] server.Server(415): Started @148628ms 2024-11-22T18:52:03,255 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:52:03,338 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:03,343 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:52:03,345 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:52:03,345 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:52:03,345 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:52:03,346 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a93e8eb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:52:03,346 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b6caabd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:52:03,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:03,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:03,463 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@371f8296{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/java.io.tmpdir/jetty-localhost-41153-hadoop-hdfs-3_4_1-tests_jar-_-any-10550452275617058474/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:03,464 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e97eedf{HTTP/1.1, (http/1.1)}{localhost:41153} 2024-11-22T18:52:03,464 INFO [Time-limited test {}] server.Server(415): Started @148853ms 2024-11-22T18:52:03,466 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:52:03,497 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:03,502 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:52:03,503 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:52:03,503 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:52:03,503 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:52:03,505 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bc294e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:52:03,505 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ec5b9af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:52:03,574 WARN [Thread-1195 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data1/current/BP-1703702927-172.17.0.2-1732301523030/current, will proceed with Du for space computation calculation, 2024-11-22T18:52:03,574 WARN [Thread-1196 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data2/current/BP-1703702927-172.17.0.2-1732301523030/current, will proceed with Du for space computation calculation, 2024-11-22T18:52:03,598 WARN [Thread-1174 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:52:03,600 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x18787bd16e6ca59c with lease ID 0x94f2c80c13fc39bf: Processing first storage report for DS-647e0fc4-b3cf-4111-b483-31aada013bb6 from datanode DatanodeRegistration(127.0.0.1:45781, datanodeUuid=6573a035-60e5-4acc-9a47-16eae730f01f, infoPort=40621, infoSecurePort=0, ipcPort=40773, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030) 2024-11-22T18:52:03,600 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18787bd16e6ca59c with lease ID 0x94f2c80c13fc39bf: from storage DS-647e0fc4-b3cf-4111-b483-31aada013bb6 node DatanodeRegistration(127.0.0.1:45781, datanodeUuid=6573a035-60e5-4acc-9a47-16eae730f01f, infoPort=40621, infoSecurePort=0, ipcPort=40773, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:03,600 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x18787bd16e6ca59c with lease ID 0x94f2c80c13fc39bf: Processing first storage report for DS-748a9062-e5ac-496d-b731-eb20b79fe691 from datanode DatanodeRegistration(127.0.0.1:45781, datanodeUuid=6573a035-60e5-4acc-9a47-16eae730f01f, infoPort=40621, infoSecurePort=0, ipcPort=40773, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030) 2024-11-22T18:52:03,600 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18787bd16e6ca59c with lease ID 0x94f2c80c13fc39bf: from storage DS-748a9062-e5ac-496d-b731-eb20b79fe691 node DatanodeRegistration(127.0.0.1:45781, datanodeUuid=6573a035-60e5-4acc-9a47-16eae730f01f, infoPort=40621, infoSecurePort=0, ipcPort=40773, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:03,624 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@341f9f9e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/java.io.tmpdir/jetty-localhost-42065-hadoop-hdfs-3_4_1-tests_jar-_-any-14642046550665744661/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:03,624 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f78fbde{HTTP/1.1, (http/1.1)}{localhost:42065} 2024-11-22T18:52:03,624 INFO [Time-limited test {}] server.Server(415): Started @149013ms 2024-11-22T18:52:03,626 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:52:03,734 WARN [Thread-1221 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data3/current/BP-1703702927-172.17.0.2-1732301523030/current, will proceed with Du for space computation calculation, 2024-11-22T18:52:03,734 WARN [Thread-1222 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data4/current/BP-1703702927-172.17.0.2-1732301523030/current, will proceed with Du for space computation calculation, 2024-11-22T18:52:03,758 WARN [Thread-1210 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:52:03,760 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8f94b00ad265ba43 with lease ID 0x94f2c80c13fc39c0: Processing first storage report for DS-a605f1e1-91c5-4bce-9b24-67adf67db55e from datanode DatanodeRegistration(127.0.0.1:35599, datanodeUuid=c0a494c7-167c-424b-9e54-5b31acf57666, infoPort=40963, infoSecurePort=0, ipcPort=44101, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030) 2024-11-22T18:52:03,760 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f94b00ad265ba43 with lease ID 0x94f2c80c13fc39c0: from storage DS-a605f1e1-91c5-4bce-9b24-67adf67db55e node DatanodeRegistration(127.0.0.1:35599, datanodeUuid=c0a494c7-167c-424b-9e54-5b31acf57666, infoPort=40963, infoSecurePort=0, ipcPort=44101, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:03,760 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8f94b00ad265ba43 with lease ID 0x94f2c80c13fc39c0: Processing first storage report for DS-9a3de42e-922d-4c33-bef1-9c8e20d3577b from datanode DatanodeRegistration(127.0.0.1:35599, datanodeUuid=c0a494c7-167c-424b-9e54-5b31acf57666, infoPort=40963, infoSecurePort=0, ipcPort=44101, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030) 2024-11-22T18:52:03,760 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f94b00ad265ba43 with lease ID 0x94f2c80c13fc39c0: from storage DS-9a3de42e-922d-4c33-bef1-9c8e20d3577b node DatanodeRegistration(127.0.0.1:35599, datanodeUuid=c0a494c7-167c-424b-9e54-5b31acf57666, infoPort=40963, infoSecurePort=0, ipcPort=44101, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:03,850 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6 2024-11-22T18:52:03,853 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/zookeeper_0, clientPort=51025, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T18:52:03,854 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51025 2024-11-22T18:52:03,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:03,855 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:03,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35599 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:52:03,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45781 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:52:03,865 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99 with version=8 2024-11-22T18:52:03,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/hbase-staging 2024-11-22T18:52:03,868 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:52:03,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:03,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:03,868 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:52:03,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:03,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:52:03,868 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T18:52:03,868 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:52:03,869 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32959 2024-11-22T18:52:03,870 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32959 connecting to ZooKeeper ensemble=127.0.0.1:51025 2024-11-22T18:52:03,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329590x0, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:52:03,876 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32959-0x1014105761c0000 connected 2024-11-22T18:52:03,893 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:03,895 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:03,897 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:52:03,897 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99, hbase.cluster.distributed=false 2024-11-22T18:52:03,898 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:52:03,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32959 2024-11-22T18:52:03,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32959 2024-11-22T18:52:03,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32959 2024-11-22T18:52:03,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32959 2024-11-22T18:52:03,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32959 2024-11-22T18:52:03,915 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:52:03,915 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:03,915 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:03,915 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:52:03,915 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:03,915 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:52:03,915 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T18:52:03,915 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:52:03,916 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38379 2024-11-22T18:52:03,917 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38379 connecting to ZooKeeper ensemble=127.0.0.1:51025 2024-11-22T18:52:03,918 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:03,920 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:03,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:383790x0, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:52:03,924 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:383790x0, quorum=127.0.0.1:51025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:52:03,924 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38379-0x1014105761c0001 connected 2024-11-22T18:52:03,924 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T18:52:03,925 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T18:52:03,925 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T18:52:03,926 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:52:03,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38379 2024-11-22T18:52:03,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38379 2024-11-22T18:52:03,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38379 2024-11-22T18:52:03,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38379 2024-11-22T18:52:03,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38379 2024-11-22T18:52:03,939 DEBUG [M:0;d79ba0c344fb:32959 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d79ba0c344fb:32959 2024-11-22T18:52:03,939 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d79ba0c344fb,32959,1732301523867 2024-11-22T18:52:03,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:52:03,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:52:03,941 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d79ba0c344fb,32959,1732301523867 2024-11-22T18:52:03,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T18:52:03,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:03,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:03,945 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T18:52:03,945 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d79ba0c344fb,32959,1732301523867 from backup master directory 2024-11-22T18:52:03,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d79ba0c344fb,32959,1732301523867 2024-11-22T18:52:03,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:52:03,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:52:03,947 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:52:03,947 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d79ba0c344fb,32959,1732301523867 2024-11-22T18:52:03,952 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/hbase.id] with ID: 431788dc-5038-4a39-a809-bc639c875854 2024-11-22T18:52:03,952 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/.tmp/hbase.id 2024-11-22T18:52:03,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45781 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:52:03,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35599 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:52:03,958 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/.tmp/hbase.id]:[hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/hbase.id] 2024-11-22T18:52:03,970 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:03,970 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T18:52:03,972 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T18:52:03,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:03,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:03,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35599 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:52:03,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45781 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:52:03,981 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:52:03,982 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T18:52:03,983 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:52:03,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45781 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:52:03,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35599 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:52:03,993 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store 2024-11-22T18:52:03,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35599 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:52:03,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45781 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:52:03,999 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:04,000 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:52:04,000 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:04,000 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:04,000 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:52:04,000 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:04,000 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:04,000 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301524000Disabling compacts and flushes for region at 1732301524000Disabling writes for close at 1732301524000Writing region close event to WAL at 1732301524000Closed at 1732301524000 2024-11-22T18:52:04,001 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/.initializing 2024-11-22T18:52:04,001 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867 2024-11-22T18:52:04,004 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C32959%2C1732301523867, suffix=, logDir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867, archiveDir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/oldWALs, maxLogs=10 2024-11-22T18:52:04,004 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C32959%2C1732301523867.1732301524004 2024-11-22T18:52:04,009 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 2024-11-22T18:52:04,013 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40963:40963),(127.0.0.1/127.0.0.1:40621:40621)] 2024-11-22T18:52:04,013 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:52:04,013 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:04,013 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,013 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,015 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,016 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T18:52:04,016 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:04,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,018 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T18:52:04,018 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,018 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:52:04,018 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,019 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T18:52:04,019 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,020 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:52:04,020 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T18:52:04,021 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,022 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:52:04,022 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,022 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,023 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,024 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,024 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,024 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T18:52:04,026 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:04,029 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:52:04,029 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=724018, jitterRate=-0.07936452329158783}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T18:52:04,030 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732301524014Initializing all the Stores at 1732301524014Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301524014Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301524015 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301524015Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301524015Cleaning up temporary data from old regions at 1732301524024 (+9 ms)Region opened successfully at 1732301524030 (+6 ms) 2024-11-22T18:52:04,030 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T18:52:04,034 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60f313fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:52:04,035 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T18:52:04,035 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T18:52:04,035 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T18:52:04,035 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T18:52:04,035 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T18:52:04,036 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T18:52:04,036 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T18:52:04,038 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T18:52:04,039 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T18:52:04,041 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T18:52:04,041 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T18:52:04,042 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T18:52:04,045 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T18:52:04,045 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T18:52:04,046 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T18:52:04,048 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T18:52:04,049 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T18:52:04,050 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T18:52:04,052 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T18:52:04,053 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T18:52:04,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:52:04,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:52:04,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:04,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:04,057 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d79ba0c344fb,32959,1732301523867, sessionid=0x1014105761c0000, setting cluster-up flag (Was=false) 2024-11-22T18:52:04,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:04,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:04,070 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T18:52:04,070 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,32959,1732301523867 2024-11-22T18:52:04,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:04,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:04,081 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T18:52:04,082 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,32959,1732301523867 2024-11-22T18:52:04,083 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T18:52:04,085 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T18:52:04,085 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T18:52:04,085 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T18:52:04,085 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d79ba0c344fb,32959,1732301523867 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T18:52:04,087 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:52:04,087 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:52:04,087 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:52:04,087 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:52:04,087 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d79ba0c344fb:0, corePoolSize=10, maxPoolSize=10 2024-11-22T18:52:04,087 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,087 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:52:04,087 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,088 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732301554088 2024-11-22T18:52:04,088 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T18:52:04,088 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T18:52:04,088 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T18:52:04,088 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T18:52:04,088 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T18:52:04,088 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T18:52:04,089 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:52:04,089 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T18:52:04,090 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,090 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T18:52:04,092 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,093 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T18:52:04,093 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T18:52:04,093 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T18:52:04,093 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T18:52:04,093 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T18:52:04,093 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301524093,5,FailOnTimeoutGroup] 2024-11-22T18:52:04,095 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301524094,5,FailOnTimeoutGroup] 2024-11-22T18:52:04,095 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,095 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T18:52:04,095 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,095 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35599 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:52:04,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45781 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:52:04,108 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T18:52:04,108 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99 2024-11-22T18:52:04,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35599 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:52:04,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45781 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:52:04,119 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:04,120 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:52:04,121 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:52:04,121 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:04,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:52:04,123 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:52:04,123 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,124 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:04,124 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:52:04,125 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:52:04,125 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:04,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:52:04,126 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:52:04,126 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,127 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:04,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:52:04,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740 2024-11-22T18:52:04,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740 2024-11-22T18:52:04,129 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(746): ClusterId : 431788dc-5038-4a39-a809-bc639c875854 2024-11-22T18:52:04,129 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T18:52:04,130 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:52:04,130 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:52:04,130 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:52:04,132 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:52:04,132 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T18:52:04,132 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T18:52:04,134 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:52:04,134 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=840925, jitterRate=0.06929256021976471}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:52:04,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732301524119Initializing all the Stores at 1732301524120 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301524120Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301524120Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301524120Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301524120Cleaning up temporary data from old regions at 1732301524130 (+10 ms)Region opened successfully at 1732301524135 (+5 ms) 2024-11-22T18:52:04,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:52:04,135 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:52:04,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:52:04,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:52:04,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:52:04,136 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:52:04,136 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T18:52:04,136 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301524135Disabling compacts and flushes for region at 1732301524135Disabling writes for close at 1732301524135Writing region close event to WAL at 1732301524136 (+1 ms)Closed at 1732301524136 2024-11-22T18:52:04,136 DEBUG [RS:0;d79ba0c344fb:38379 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21ad8a89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:52:04,137 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:52:04,137 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T18:52:04,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T18:52:04,139 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:52:04,140 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T18:52:04,154 DEBUG [RS:0;d79ba0c344fb:38379 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d79ba0c344fb:38379 2024-11-22T18:52:04,154 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T18:52:04,154 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T18:52:04,154 DEBUG [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T18:52:04,155 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(2659): reportForDuty to master=d79ba0c344fb,32959,1732301523867 with port=38379, startcode=1732301523915 2024-11-22T18:52:04,155 DEBUG [RS:0;d79ba0c344fb:38379 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T18:52:04,157 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38447, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T18:52:04,158 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32959 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:04,158 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32959 {}] master.ServerManager(517): Registering regionserver=d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:04,159 DEBUG [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99 2024-11-22T18:52:04,159 DEBUG [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46651 2024-11-22T18:52:04,159 DEBUG [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T18:52:04,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:52:04,162 DEBUG [RS:0;d79ba0c344fb:38379 {}] zookeeper.ZKUtil(111): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:04,162 WARN [RS:0;d79ba0c344fb:38379 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:52:04,162 INFO [RS:0;d79ba0c344fb:38379 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:52:04,162 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d79ba0c344fb,38379,1732301523915] 2024-11-22T18:52:04,162 DEBUG [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:04,166 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T18:52:04,168 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T18:52:04,168 INFO [RS:0;d79ba0c344fb:38379 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T18:52:04,168 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,171 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T18:52:04,172 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T18:52:04,172 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:52:04,172 DEBUG [RS:0;d79ba0c344fb:38379 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:52:04,176 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,176 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,176 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,176 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,176 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,176 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,38379,1732301523915-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:52:04,191 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T18:52:04,192 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,38379,1732301523915-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,192 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,192 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.Replication(171): d79ba0c344fb,38379,1732301523915 started 2024-11-22T18:52:04,207 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,207 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(1482): Serving as d79ba0c344fb,38379,1732301523915, RpcServer on d79ba0c344fb/172.17.0.2:38379, sessionid=0x1014105761c0001 2024-11-22T18:52:04,207 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T18:52:04,207 DEBUG [RS:0;d79ba0c344fb:38379 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:04,207 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,38379,1732301523915' 2024-11-22T18:52:04,207 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T18:52:04,208 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T18:52:04,208 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T18:52:04,208 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T18:52:04,208 DEBUG [RS:0;d79ba0c344fb:38379 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:04,208 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,38379,1732301523915' 2024-11-22T18:52:04,208 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T18:52:04,209 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T18:52:04,209 DEBUG [RS:0;d79ba0c344fb:38379 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T18:52:04,209 INFO [RS:0;d79ba0c344fb:38379 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T18:52:04,209 INFO [RS:0;d79ba0c344fb:38379 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T18:52:04,290 WARN [d79ba0c344fb:32959 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T18:52:04,311 INFO [RS:0;d79ba0c344fb:38379 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C38379%2C1732301523915, suffix=, logDir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915, archiveDir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/oldWALs, maxLogs=32 2024-11-22T18:52:04,312 INFO [RS:0;d79ba0c344fb:38379 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C38379%2C1732301523915.1732301524312 2024-11-22T18:52:04,318 INFO [RS:0;d79ba0c344fb:38379 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 2024-11-22T18:52:04,319 DEBUG [RS:0;d79ba0c344fb:38379 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40963:40963),(127.0.0.1/127.0.0.1:40621:40621)] 2024-11-22T18:52:04,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:04,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:04,540 DEBUG [d79ba0c344fb:32959 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T18:52:04,541 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:04,542 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,38379,1732301523915, state=OPENING 2024-11-22T18:52:04,544 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T18:52:04,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:04,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:04,546 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:52:04,546 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:52:04,546 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:52:04,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,38379,1732301523915}] 2024-11-22T18:52:04,699 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T18:52:04,701 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45793, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T18:52:04,705 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T18:52:04,705 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:52:04,707 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C38379%2C1732301523915.meta, suffix=.meta, logDir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915, archiveDir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/oldWALs, maxLogs=32 2024-11-22T18:52:04,708 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta 2024-11-22T18:52:04,713 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta 2024-11-22T18:52:04,717 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40621:40621),(127.0.0.1/127.0.0.1:40963:40963)] 2024-11-22T18:52:04,720 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:52:04,721 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T18:52:04,721 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T18:52:04,721 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T18:52:04,721 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T18:52:04,721 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:04,721 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T18:52:04,721 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T18:52:04,723 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:52:04,724 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:52:04,724 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,724 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:04,724 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:52:04,725 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:52:04,725 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:04,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:52:04,727 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:52:04,727 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:04,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:52:04,728 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:52:04,728 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:04,728 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:52:04,729 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740 2024-11-22T18:52:04,730 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740 2024-11-22T18:52:04,731 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:52:04,731 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:52:04,732 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:52:04,733 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:52:04,734 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=791583, jitterRate=0.006550520658493042}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:52:04,734 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T18:52:04,735 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732301524721Writing region info on filesystem at 1732301524721Initializing all the Stores at 1732301524722 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301524722Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301524723 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301524723Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301524723Cleaning up temporary data from old regions at 1732301524731 (+8 ms)Running coprocessor post-open hooks at 1732301524734 (+3 ms)Region opened successfully at 1732301524735 (+1 ms) 2024-11-22T18:52:04,736 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732301524699 2024-11-22T18:52:04,738 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T18:52:04,738 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T18:52:04,739 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:04,740 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,38379,1732301523915, state=OPEN 2024-11-22T18:52:04,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:52:04,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:52:04,746 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:04,747 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:52:04,747 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:52:04,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T18:52:04,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,38379,1732301523915 in 200 msec 2024-11-22T18:52:04,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T18:52:04,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 613 msec 2024-11-22T18:52:04,753 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:52:04,753 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T18:52:04,754 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:52:04,755 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,38379,1732301523915, seqNum=-1] 2024-11-22T18:52:04,755 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:52:04,756 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52257, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:52:04,761 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 675 msec 2024-11-22T18:52:04,761 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732301524761, completionTime=-1 2024-11-22T18:52:04,762 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T18:52:04,762 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T18:52:04,763 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T18:52:04,763 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732301584763 2024-11-22T18:52:04,763 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732301644763 2024-11-22T18:52:04,763 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-22T18:52:04,764 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,32959,1732301523867-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,764 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,32959,1732301523867-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,764 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,32959,1732301523867-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,764 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d79ba0c344fb:32959, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,764 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,764 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,766 DEBUG [master/d79ba0c344fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T18:52:04,768 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.821sec 2024-11-22T18:52:04,768 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T18:52:04,768 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T18:52:04,768 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T18:52:04,768 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T18:52:04,768 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T18:52:04,768 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,32959,1732301523867-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:52:04,768 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,32959,1732301523867-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T18:52:04,771 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T18:52:04,771 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T18:52:04,771 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,32959,1732301523867-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:04,829 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62cd82cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:52:04,829 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d79ba0c344fb,32959,-1 for getting cluster id 2024-11-22T18:52:04,830 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T18:52:04,831 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '431788dc-5038-4a39-a809-bc639c875854' 2024-11-22T18:52:04,832 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T18:52:04,832 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "431788dc-5038-4a39-a809-bc639c875854" 2024-11-22T18:52:04,832 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30c9d755, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:52:04,832 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d79ba0c344fb,32959,-1] 2024-11-22T18:52:04,832 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T18:52:04,833 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:52:04,834 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T18:52:04,834 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@277eda4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:52:04,835 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:52:04,836 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,38379,1732301523915, seqNum=-1] 2024-11-22T18:52:04,836 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:52:04,837 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59880, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:52:04,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d79ba0c344fb,32959,1732301523867 2024-11-22T18:52:04,839 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:04,842 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T18:52:04,842 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-22T18:52:04,842 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-22T18:52:04,842 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T18:52:04,843 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is d79ba0c344fb,32959,1732301523867 2024-11-22T18:52:04,843 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7b4b96b6 2024-11-22T18:52:04,843 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T18:52:04,845 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40696, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T18:52:04,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T18:52:04,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T18:52:04,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:52:04,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T18:52:04,848 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T18:52:04,848 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:04,848 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-22T18:52:04,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T18:52:04,849 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T18:52:04,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35599 is added to blk_1073741835_1011 (size=395) 2024-11-22T18:52:04,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45781 is added to blk_1073741835_1011 (size=395) 2024-11-22T18:52:04,858 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 87d631ef66d1eba8cb24b6bf7126a2f0, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99 2024-11-22T18:52:04,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45781 is added to blk_1073741836_1012 (size=78) 2024-11-22T18:52:04,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35599 is added to blk_1073741836_1012 (size=78) 2024-11-22T18:52:04,865 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:04,865 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 87d631ef66d1eba8cb24b6bf7126a2f0, disabling compactions & flushes 2024-11-22T18:52:04,865 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:04,865 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:04,865 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. after waiting 0 ms 2024-11-22T18:52:04,865 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:04,865 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:04,865 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 87d631ef66d1eba8cb24b6bf7126a2f0: Waiting for close lock at 1732301524865Disabling compacts and flushes for region at 1732301524865Disabling writes for close at 1732301524865Writing region close event to WAL at 1732301524865Closed at 1732301524865 2024-11-22T18:52:04,867 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T18:52:04,867 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732301524867"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732301524867"}]},"ts":"1732301524867"} 2024-11-22T18:52:04,870 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T18:52:04,871 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T18:52:04,871 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732301524871"}]},"ts":"1732301524871"} 2024-11-22T18:52:04,873 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-22T18:52:04,874 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=87d631ef66d1eba8cb24b6bf7126a2f0, ASSIGN}] 2024-11-22T18:52:04,875 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=87d631ef66d1eba8cb24b6bf7126a2f0, ASSIGN 2024-11-22T18:52:04,876 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=87d631ef66d1eba8cb24b6bf7126a2f0, ASSIGN; state=OFFLINE, location=d79ba0c344fb,38379,1732301523915; forceNewPlan=false, retain=false 2024-11-22T18:52:05,027 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=87d631ef66d1eba8cb24b6bf7126a2f0, regionState=OPENING, regionLocation=d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:05,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=87d631ef66d1eba8cb24b6bf7126a2f0, ASSIGN because future has completed 2024-11-22T18:52:05,031 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87d631ef66d1eba8cb24b6bf7126a2f0, server=d79ba0c344fb,38379,1732301523915}] 2024-11-22T18:52:05,187 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:05,188 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 87d631ef66d1eba8cb24b6bf7126a2f0, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:52:05,188 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,188 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:05,188 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,188 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,190 INFO [StoreOpener-87d631ef66d1eba8cb24b6bf7126a2f0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,191 INFO [StoreOpener-87d631ef66d1eba8cb24b6bf7126a2f0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 87d631ef66d1eba8cb24b6bf7126a2f0 columnFamilyName info 2024-11-22T18:52:05,191 DEBUG [StoreOpener-87d631ef66d1eba8cb24b6bf7126a2f0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:05,192 INFO [StoreOpener-87d631ef66d1eba8cb24b6bf7126a2f0-1 {}] regionserver.HStore(327): Store=87d631ef66d1eba8cb24b6bf7126a2f0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:52:05,192 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,193 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/default/TestLogRolling-testLogRollOnPipelineRestart/87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,193 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/default/TestLogRolling-testLogRollOnPipelineRestart/87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,193 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,193 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,195 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,197 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/default/TestLogRolling-testLogRollOnPipelineRestart/87d631ef66d1eba8cb24b6bf7126a2f0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:52:05,198 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 87d631ef66d1eba8cb24b6bf7126a2f0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848523, jitterRate=0.0789531022310257}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T18:52:05,198 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:05,199 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 87d631ef66d1eba8cb24b6bf7126a2f0: Running coprocessor pre-open hook at 1732301525188Writing region info on filesystem at 1732301525188Initializing all the Stores at 1732301525189 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301525189Cleaning up temporary data from old regions at 1732301525193 (+4 ms)Running coprocessor post-open hooks at 1732301525198 (+5 ms)Region opened successfully at 1732301525199 (+1 ms) 2024-11-22T18:52:05,200 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0., pid=6, masterSystemTime=1732301525183 2024-11-22T18:52:05,202 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:05,203 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:05,203 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=87d631ef66d1eba8cb24b6bf7126a2f0, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:05,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87d631ef66d1eba8cb24b6bf7126a2f0, server=d79ba0c344fb,38379,1732301523915 because future has completed 2024-11-22T18:52:05,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T18:52:05,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 87d631ef66d1eba8cb24b6bf7126a2f0, server=d79ba0c344fb,38379,1732301523915 in 177 msec 2024-11-22T18:52:05,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T18:52:05,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=87d631ef66d1eba8cb24b6bf7126a2f0, ASSIGN in 337 msec 2024-11-22T18:52:05,214 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T18:52:05,214 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732301525214"}]},"ts":"1732301525214"} 2024-11-22T18:52:05,216 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-22T18:52:05,217 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T18:52:05,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 371 msec 2024-11-22T18:52:05,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:05,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:06,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:06,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:07,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:07,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:08,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:08,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:09,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:09,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:10,224 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T18:52:10,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:10,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:10,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:10,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:10,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:10,241 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:10,244 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:10,244 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:10,244 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:10,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:10,250 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T18:52:10,250 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T18:52:10,250 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T18:52:10,250 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-22T18:52:10,251 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:52:10,251 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T18:52:10,251 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T18:52:10,252 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-22T18:52:10,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:10,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:11,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:11,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:12,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:12,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:13,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:13,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:14,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:14,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:14,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T18:52:14,877 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-22T18:52:14,877 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-22T18:52:14,880 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T18:52:14,880 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:14,883 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0., hostname=d79ba0c344fb,38379,1732301523915, seqNum=2] 2024-11-22T18:52:15,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:15,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:16,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:16,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:16,886 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 2024-11-22T18:52:16,887 WARN [ResponseProcessor for block BP-1703702927-172.17.0.2-1732301523030:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1703702927-172.17.0.2-1732301523030:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:16,887 WARN [ResponseProcessor for block BP-1703702927-172.17.0.2-1732301523030:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1703702927-172.17.0.2-1732301523030:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:16,887 WARN [ResponseProcessor for block BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:35599,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:16,887 WARN [DataStreamer for file /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 block BP-1703702927-172.17.0.2-1732301523030:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1703702927-172.17.0.2-1732301523030:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35599,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK], DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35599,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK]) is bad. 2024-11-22T18:52:16,887 WARN [DataStreamer for file /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 block BP-1703702927-172.17.0.2-1732301523030:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1703702927-172.17.0.2-1732301523030:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35599,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK], DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35599,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK]) is bad. 2024-11-22T18:52:16,888 WARN [DataStreamer for file /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta block BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK], DatanodeInfoWithStorage[127.0.0.1:35599,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35599,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK]) is bad. 2024-11-22T18:52:16,888 WARN [PacketResponder: BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35599] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:16,888 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165601234_22 at /127.0.0.1:51406 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51406 dst: /127.0.0.1:45781 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:16,888 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165601234_22 at /127.0.0.1:53464 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35599:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53464 dst: /127.0.0.1:35599 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:16,888 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1131236379_22 at /127.0.0.1:51438 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51438 dst: /127.0.0.1:45781 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:16,888 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1131236379_22 at /127.0.0.1:53496 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35599:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53496 dst: /127.0.0.1:35599 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:16,889 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1131236379_22 at /127.0.0.1:51436 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51436 dst: /127.0.0.1:45781 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:16,889 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1131236379_22 at /127.0.0.1:53510 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35599:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53510 dst: /127.0.0.1:35599 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:16,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@341f9f9e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:16,891 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f78fbde{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:52:16,892 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:52:16,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ec5b9af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:52:16,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bc294e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,STOPPED} 2024-11-22T18:52:16,894 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:52:16,894 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:52:16,894 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1703702927-172.17.0.2-1732301523030 (Datanode Uuid c0a494c7-167c-424b-9e54-5b31acf57666) service to localhost/127.0.0.1:46651 2024-11-22T18:52:16,894 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:52:16,894 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data3/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:16,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data4/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:16,895 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:52:16,905 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:16,908 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:52:16,909 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:52:16,909 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:52:16,909 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:52:16,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4136ef12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:52:16,910 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36974255{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:52:17,024 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12f241e2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/java.io.tmpdir/jetty-localhost-46311-hadoop-hdfs-3_4_1-tests_jar-_-any-14598752054840261288/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:17,024 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2cce6536{HTTP/1.1, (http/1.1)}{localhost:46311} 2024-11-22T18:52:17,024 INFO [Time-limited test {}] server.Server(415): Started @162414ms 2024-11-22T18:52:17,026 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:52:17,045 WARN [ResponseProcessor for block BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:17,045 WARN [ResponseProcessor for block BP-1703702927-172.17.0.2-1732301523030:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1703702927-172.17.0.2-1732301523030:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:17,045 WARN [ResponseProcessor for block BP-1703702927-172.17.0.2-1732301523030:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1703702927-172.17.0.2-1732301523030:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:17,045 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165601234_22 at /127.0.0.1:45464 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45464 dst: /127.0.0.1:45781 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:17,046 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1131236379_22 at /127.0.0.1:45442 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45442 dst: /127.0.0.1:45781 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:17,046 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1131236379_22 at /127.0.0.1:45444 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45444 dst: /127.0.0.1:45781 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:17,052 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@371f8296{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:17,053 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e97eedf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:52:17,053 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:52:17,053 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b6caabd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:52:17,053 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a93e8eb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,STOPPED} 2024-11-22T18:52:17,054 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:52:17,054 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1703702927-172.17.0.2-1732301523030 (Datanode Uuid 6573a035-60e5-4acc-9a47-16eae730f01f) service to localhost/127.0.0.1:46651 2024-11-22T18:52:17,054 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:52:17,054 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:52:17,056 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data1/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:17,056 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data2/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:17,056 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:52:17,065 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:17,068 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:52:17,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:52:17,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:52:17,069 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:52:17,069 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11fd78ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:52:17,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43a7cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:52:17,120 WARN [Thread-1345 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:52:17,122 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45e3e5dc4769f291 with lease ID 0x94f2c80c13fc39c1: from storage DS-a605f1e1-91c5-4bce-9b24-67adf67db55e node DatanodeRegistration(127.0.0.1:42641, datanodeUuid=c0a494c7-167c-424b-9e54-5b31acf57666, infoPort=38839, infoSecurePort=0, ipcPort=45291, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:17,123 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45e3e5dc4769f291 with lease ID 0x94f2c80c13fc39c1: from storage DS-9a3de42e-922d-4c33-bef1-9c8e20d3577b node DatanodeRegistration(127.0.0.1:42641, datanodeUuid=c0a494c7-167c-424b-9e54-5b31acf57666, infoPort=38839, infoSecurePort=0, ipcPort=45291, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T18:52:17,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f4861a6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/java.io.tmpdir/jetty-localhost-33223-hadoop-hdfs-3_4_1-tests_jar-_-any-7785803948584900577/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:17,184 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68a8c2cd{HTTP/1.1, (http/1.1)}{localhost:33223} 2024-11-22T18:52:17,184 INFO [Time-limited test {}] server.Server(415): Started @162573ms 2024-11-22T18:52:17,185 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:52:17,270 WARN [Thread-1376 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:52:17,273 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfecc6cf584ef74ba with lease ID 0x94f2c80c13fc39c2: from storage DS-647e0fc4-b3cf-4111-b483-31aada013bb6 node DatanodeRegistration(127.0.0.1:40841, datanodeUuid=6573a035-60e5-4acc-9a47-16eae730f01f, infoPort=43417, infoSecurePort=0, ipcPort=40513, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:17,273 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfecc6cf584ef74ba with lease ID 0x94f2c80c13fc39c2: from storage DS-748a9062-e5ac-496d-b731-eb20b79fe691 node DatanodeRegistration(127.0.0.1:40841, datanodeUuid=6573a035-60e5-4acc-9a47-16eae730f01f, infoPort=43417, infoSecurePort=0, ipcPort=40513, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:17,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:17,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:18,203 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-22T18:52:18,206 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-22T18:52:18,207 ERROR [FSHLog-0-hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99-prefix:d79ba0c344fb,38379,1732301523915 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:18,207 WARN [FSHLog-0-hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99-prefix:d79ba0c344fb,38379,1732301523915 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:18,207 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C38379%2C1732301523915:(num 1732301524312) roll requested 2024-11-22T18:52:18,207 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C38379%2C1732301523915.1732301538207 2024-11-22T18:52:18,213 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 newFile=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 2024-11-22T18:52:18,213 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:18,213 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:18,213 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:18,213 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:18,213 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:18,214 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 2024-11-22T18:52:18,214 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:18,214 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:18,214 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 2024-11-22T18:52:18,214 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38839:38839),(127.0.0.1/127.0.0.1:43417:43417)] 2024-11-22T18:52:18,215 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 is not closed yet, will try archiving it next time 2024-11-22T18:52:18,215 WARN [IPC Server handler 4 on default port 46651 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-22T18:52:18,215 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 after 1ms 2024-11-22T18:52:18,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:18,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:19,122 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T18:52:19,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:19,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:20,218 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-22T18:52:20,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:20,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:21,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:21,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:22,216 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 after 4002ms 2024-11-22T18:52:22,221 WARN [ResponseProcessor for block BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:40841,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:22,221 WARN [DataStreamer for file /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 block BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42641,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK], DatanodeInfoWithStorage[127.0.0.1:40841,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40841,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]) is bad. 2024-11-22T18:52:22,221 WARN [PacketResponder: BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40841] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:22,222 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1131236379_22 at /127.0.0.1:56484 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42641:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56484 dst: /127.0.0.1:42641 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:22,222 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1131236379_22 at /127.0.0.1:60892 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40841:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60892 dst: /127.0.0.1:40841 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:22,223 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f4861a6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:22,223 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68a8c2cd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:52:22,224 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:52:22,224 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43a7cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:52:22,224 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11fd78ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,STOPPED} 2024-11-22T18:52:22,225 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:52:22,225 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:52:22,225 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1703702927-172.17.0.2-1732301523030 (Datanode Uuid 6573a035-60e5-4acc-9a47-16eae730f01f) service to localhost/127.0.0.1:46651 2024-11-22T18:52:22,226 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:52:22,226 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data1/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:22,226 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data2/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:22,227 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:52:22,240 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:22,243 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:52:22,244 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:52:22,244 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:52:22,244 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:52:22,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f8a0d0d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:52:22,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57d5f4b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:52:22,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67d70e61{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/java.io.tmpdir/jetty-localhost-37891-hadoop-hdfs-3_4_1-tests_jar-_-any-2604976360061506768/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:22,358 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f1a012{HTTP/1.1, (http/1.1)}{localhost:37891} 2024-11-22T18:52:22,358 INFO [Time-limited test {}] server.Server(415): Started @167748ms 2024-11-22T18:52:22,360 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:52:22,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:22,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:22,377 WARN [ResponseProcessor for block BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:22,378 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1131236379_22 at /127.0.0.1:55392 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42641:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55392 dst: /127.0.0.1:42641 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:22,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12f241e2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:22,381 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2cce6536{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:52:22,381 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:52:22,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36974255{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:52:22,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4136ef12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,STOPPED} 2024-11-22T18:52:22,383 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:52:22,383 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:52:22,383 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1703702927-172.17.0.2-1732301523030 (Datanode Uuid c0a494c7-167c-424b-9e54-5b31acf57666) service to localhost/127.0.0.1:46651 2024-11-22T18:52:22,383 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:52:22,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data3/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:22,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data4/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:22,385 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:52:22,395 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:22,399 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:52:22,399 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:52:22,399 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:52:22,399 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:52:22,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49f94f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:52:22,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64685bd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:52:22,448 WARN [Thread-1419 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:52:22,450 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x87c80c7618c443ef with lease ID 0x94f2c80c13fc39c3: from storage DS-647e0fc4-b3cf-4111-b483-31aada013bb6 node DatanodeRegistration(127.0.0.1:35359, datanodeUuid=6573a035-60e5-4acc-9a47-16eae730f01f, infoPort=38643, infoSecurePort=0, ipcPort=43957, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:22,450 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x87c80c7618c443ef with lease ID 0x94f2c80c13fc39c3: from storage DS-748a9062-e5ac-496d-b731-eb20b79fe691 node DatanodeRegistration(127.0.0.1:35359, datanodeUuid=6573a035-60e5-4acc-9a47-16eae730f01f, infoPort=38643, infoSecurePort=0, ipcPort=43957, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:22,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@652ca842{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/java.io.tmpdir/jetty-localhost-34493-hadoop-hdfs-3_4_1-tests_jar-_-any-413003721054629698/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:22,518 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d802677{HTTP/1.1, (http/1.1)}{localhost:34493} 2024-11-22T18:52:22,518 INFO [Time-limited test {}] server.Server(415): Started @167907ms 2024-11-22T18:52:22,519 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:52:22,610 WARN [Thread-1450 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:52:22,612 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb838d9d0703395e9 with lease ID 0x94f2c80c13fc39c4: from storage DS-a605f1e1-91c5-4bce-9b24-67adf67db55e node DatanodeRegistration(127.0.0.1:45249, datanodeUuid=c0a494c7-167c-424b-9e54-5b31acf57666, infoPort=37887, infoSecurePort=0, ipcPort=35385, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:22,612 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb838d9d0703395e9 with lease ID 0x94f2c80c13fc39c4: from storage DS-9a3de42e-922d-4c33-bef1-9c8e20d3577b node DatanodeRegistration(127.0.0.1:45249, datanodeUuid=c0a494c7-167c-424b-9e54-5b31acf57666, infoPort=37887, infoSecurePort=0, ipcPort=35385, storageInfo=lv=-57;cid=testClusterID;nsid=859048353;c=1732301523030), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:23,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:23,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:23,542 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-22T18:52:23,544 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-22T18:52:23,545 ERROR [FSHLog-0-hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99-prefix:d79ba0c344fb,38379,1732301523915 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42641,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:23,545 WARN [FSHLog-0-hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99-prefix:d79ba0c344fb,38379,1732301523915 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42641,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:23,545 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C38379%2C1732301523915:(num 1732301538207) roll requested 2024-11-22T18:52:23,546 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C38379%2C1732301523915.1732301543545 2024-11-22T18:52:23,551 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 newFile=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301543545 2024-11-22T18:52:23,551 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:23,551 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:23,551 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:23,551 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:23,551 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:23,551 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301543545 2024-11-22T18:52:23,552 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42641,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:23,552 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42641,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:23,552 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 2024-11-22T18:52:23,552 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38643:38643),(127.0.0.1/127.0.0.1:37887:37887)] 2024-11-22T18:52:23,552 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 is not closed yet, will try archiving it next time 2024-11-22T18:52:23,552 WARN [IPC Server handler 2 on default port 46651 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-22T18:52:23,552 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 after 0ms 2024-11-22T18:52:24,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:24,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:24,451 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T18:52:25,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:25,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:25,554 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C38379%2C1732301523915.1732301545553 2024-11-22T18:52:25,559 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301543545 newFile=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 2024-11-22T18:52:25,559 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:25,559 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:25,559 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:25,559 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:25,559 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:25,560 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301543545 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 2024-11-22T18:52:25,561 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38643:38643),(127.0.0.1/127.0.0.1:37887:37887)] 2024-11-22T18:52:25,561 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 is not closed yet, will try archiving it next time 2024-11-22T18:52:25,561 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301543545 is not closed yet, will try archiving it next time 2024-11-22T18:52:25,561 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 2024-11-22T18:52:25,561 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 2024-11-22T18:52:25,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741838_1019 (size=1264) 2024-11-22T18:52:25,562 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 after 1ms 2024-11-22T18:52:25,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741838_1019 (size=1264) 2024-11-22T18:52:25,562 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 2024-11-22T18:52:25,562 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 is not closed yet, will try archiving it next time 2024-11-22T18:52:25,573 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732301525199/Put/vlen=218/seqid=0] 2024-11-22T18:52:25,574 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732301534885/Put/vlen=1045/seqid=0] 2024-11-22T18:52:25,574 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301524312 2024-11-22T18:52:25,574 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 2024-11-22T18:52:25,574 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 2024-11-22T18:52:25,574 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 after 0ms 2024-11-22T18:52:25,574 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 2024-11-22T18:52:25,577 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732301538207/Put/vlen=1045/seqid=0] 2024-11-22T18:52:25,578 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732301540219/Put/vlen=1045/seqid=0] 2024-11-22T18:52:25,578 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 2024-11-22T18:52:25,578 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301543545 2024-11-22T18:52:25,578 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301543545 2024-11-22T18:52:25,578 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301543545 after 0ms 2024-11-22T18:52:25,578 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301543545 2024-11-22T18:52:25,581 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732301543545/Put/vlen=1045/seqid=0] 2024-11-22T18:52:25,581 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 2024-11-22T18:52:25,581 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 2024-11-22T18:52:25,581 WARN [IPC Server handler 4 on default port 46651 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-22T18:52:25,581 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 after 0ms 2024-11-22T18:52:26,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:26,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:26,451 WARN [ResponseProcessor for block BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:26,451 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165601234_22 at /127.0.0.1:59028 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:35359:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59028 dst: /127.0.0.1:35359 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:35359 remote=/127.0.0.1:59028]. Total timeout mills is 60000, 59108 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:26,451 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165601234_22 at /127.0.0.1:53862 [Receiving block BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45249:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53862 dst: /127.0.0.1:45249 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:52:26,451 WARN [DataStreamer for file /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 block BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35359,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK], DatanodeInfoWithStorage[127.0.0.1:45249,DS-a605f1e1-91c5-4bce-9b24-67adf67db55e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35359,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]) is bad. 2024-11-22T18:52:26,452 WARN [DataStreamer for file /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 block BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:26,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741839_1022 (size=85) 2024-11-22T18:52:26,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741839_1022 (size=85) 2024-11-22T18:52:27,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:27,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:27,553 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301538207 after 4001ms 2024-11-22T18:52:28,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:28,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:29,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:29,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:29,582 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 after 4001ms 2024-11-22T18:52:29,582 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 2024-11-22T18:52:29,586 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 2024-11-22T18:52:29,586 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 87d631ef66d1eba8cb24b6bf7126a2f0 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-22T18:52:29,587 ERROR [FSHLog-0-hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99-prefix:d79ba0c344fb,38379,1732301523915 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:29,587 WARN [FSHLog-0-hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99-prefix:d79ba0c344fb,38379,1732301523915 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:29,587 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C38379%2C1732301523915:(num 1732301545553) roll requested 2024-11-22T18:52:29,588 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C38379%2C1732301523915.1732301549587 2024-11-22T18:52:29,593 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 newFile=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301549587 2024-11-22T18:52:29,593 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,593 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,593 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,593 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,593 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,593 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301549587 2024-11-22T18:52:29,593 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:29,594 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1703702927-172.17.0.2-1732301523030:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:29,594 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 2024-11-22T18:52:29,594 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 after 0ms 2024-11-22T18:52:29,599 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 to hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/oldWALs/d79ba0c344fb%2C38379%2C1732301523915.1732301545553 2024-11-22T18:52:29,600 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37887:37887),(127.0.0.1/127.0.0.1:38643:38643)] 2024-11-22T18:52:29,616 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/default/TestLogRolling-testLogRollOnPipelineRestart/87d631ef66d1eba8cb24b6bf7126a2f0/.tmp/info/85ec7a0ba5ee49a4a5405bcab271be2b is 1080, key is row1002/info:/1732301534885/Put/seqid=0 2024-11-22T18:52:29,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741841_1024 (size=9270) 2024-11-22T18:52:29,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741841_1024 (size=9270) 2024-11-22T18:52:29,622 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/default/TestLogRolling-testLogRollOnPipelineRestart/87d631ef66d1eba8cb24b6bf7126a2f0/.tmp/info/85ec7a0ba5ee49a4a5405bcab271be2b 2024-11-22T18:52:29,628 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/default/TestLogRolling-testLogRollOnPipelineRestart/87d631ef66d1eba8cb24b6bf7126a2f0/.tmp/info/85ec7a0ba5ee49a4a5405bcab271be2b as hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/default/TestLogRolling-testLogRollOnPipelineRestart/87d631ef66d1eba8cb24b6bf7126a2f0/info/85ec7a0ba5ee49a4a5405bcab271be2b 2024-11-22T18:52:29,633 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/default/TestLogRolling-testLogRollOnPipelineRestart/87d631ef66d1eba8cb24b6bf7126a2f0/info/85ec7a0ba5ee49a4a5405bcab271be2b, entries=4, sequenceid=8, filesize=9.1 K 2024-11-22T18:52:29,635 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 87d631ef66d1eba8cb24b6bf7126a2f0 in 48ms, sequenceid=8, compaction requested=false 2024-11-22T18:52:29,635 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 87d631ef66d1eba8cb24b6bf7126a2f0: 2024-11-22T18:52:29,635 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-22T18:52:29,635 ERROR [FSHLog-0-hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99-prefix:d79ba0c344fb,38379,1732301523915.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:29,635 WARN [FSHLog-0-hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99-prefix:d79ba0c344fb,38379,1732301523915.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:29,635 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C38379%2C1732301523915.meta:.meta(num 1732301524708) roll requested 2024-11-22T18:52:29,636 INFO [regionserver/d79ba0c344fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C38379%2C1732301523915.meta.1732301549636.meta 2024-11-22T18:52:29,640 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,640 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,641 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,641 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,641 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,641 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301549636.meta 2024-11-22T18:52:29,641 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:29,641 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:29,641 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta 2024-11-22T18:52:29,642 WARN [IPC Server handler 4 on default port 46651 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-22T18:52:29,642 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta after 1ms 2024-11-22T18:52:29,644 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38643:38643),(127.0.0.1/127.0.0.1:37887:37887)] 2024-11-22T18:52:29,644 DEBUG [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta is not closed yet, will try archiving it next time 2024-11-22T18:52:29,659 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/.tmp/info/bbd008d80a384b4c89278b59d9422234 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0./info:regioninfo/1732301525203/Put/seqid=0 2024-11-22T18:52:29,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741843_1027 (size=7125) 2024-11-22T18:52:29,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741843_1027 (size=7125) 2024-11-22T18:52:29,665 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/.tmp/info/bbd008d80a384b4c89278b59d9422234 2024-11-22T18:52:29,684 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/.tmp/ns/12d8af8478104c2c8cac74200f6c9815 is 43, key is default/ns:d/1732301524757/Put/seqid=0 2024-11-22T18:52:29,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741844_1028 (size=5153) 2024-11-22T18:52:29,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741844_1028 (size=5153) 2024-11-22T18:52:29,689 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/.tmp/ns/12d8af8478104c2c8cac74200f6c9815 2024-11-22T18:52:29,716 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/.tmp/table/d7803f12038d4bd7a33f77a6c1c6f936 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732301525214/Put/seqid=0 2024-11-22T18:52:29,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741845_1029 (size=5438) 2024-11-22T18:52:29,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741845_1029 (size=5438) 2024-11-22T18:52:29,721 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/.tmp/table/d7803f12038d4bd7a33f77a6c1c6f936 2024-11-22T18:52:29,727 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/.tmp/info/bbd008d80a384b4c89278b59d9422234 as hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/info/bbd008d80a384b4c89278b59d9422234 2024-11-22T18:52:29,731 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/info/bbd008d80a384b4c89278b59d9422234, entries=10, sequenceid=11, filesize=7.0 K 2024-11-22T18:52:29,732 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/.tmp/ns/12d8af8478104c2c8cac74200f6c9815 as hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/ns/12d8af8478104c2c8cac74200f6c9815 2024-11-22T18:52:29,738 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/ns/12d8af8478104c2c8cac74200f6c9815, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T18:52:29,739 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/.tmp/table/d7803f12038d4bd7a33f77a6c1c6f936 as hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/table/d7803f12038d4bd7a33f77a6c1c6f936 2024-11-22T18:52:29,744 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/table/d7803f12038d4bd7a33f77a6c1c6f936, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T18:52:29,745 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 110ms, sequenceid=11, compaction requested=false 2024-11-22T18:52:29,745 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T18:52:29,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T18:52:29,751 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:52:29,751 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:52:29,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:52:29,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:52:29,751 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T18:52:29,751 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T18:52:29,751 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1832832606, stopped=false 2024-11-22T18:52:29,751 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d79ba0c344fb,32959,1732301523867 2024-11-22T18:52:29,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:52:29,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:52:29,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:29,753 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:52:29,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:29,753 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:52:29,753 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:52:29,753 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:52:29,754 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd79ba0c344fb,38379,1732301523915' ***** 2024-11-22T18:52:29,754 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T18:52:29,754 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:52:29,754 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:52:29,754 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T18:52:29,755 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(3091): Received CLOSE for 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(959): stopping server d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d79ba0c344fb:38379. 2024-11-22T18:52:29,755 DEBUG [RS:0;d79ba0c344fb:38379 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:52:29,755 DEBUG [RS:0;d79ba0c344fb:38379 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:52:29,755 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 87d631ef66d1eba8cb24b6bf7126a2f0, disabling compactions & flushes 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T18:52:29,755 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T18:52:29,755 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T18:52:29,755 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. after waiting 0 ms 2024-11-22T18:52:29,755 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:29,755 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T18:52:29,755 DEBUG [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(1325): Online Regions={87d631ef66d1eba8cb24b6bf7126a2f0=TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T18:52:29,755 DEBUG [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 87d631ef66d1eba8cb24b6bf7126a2f0 2024-11-22T18:52:29,756 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:52:29,756 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:52:29,756 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:52:29,756 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:52:29,756 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:52:29,760 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/default/TestLogRolling-testLogRollOnPipelineRestart/87d631ef66d1eba8cb24b6bf7126a2f0/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-22T18:52:29,760 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T18:52:29,761 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:29,761 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:52:29,761 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:52:29,761 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 87d631ef66d1eba8cb24b6bf7126a2f0: Waiting for close lock at 1732301549755Running coprocessor pre-close hooks at 1732301549755Disabling compacts and flushes for region at 1732301549755Disabling writes for close at 1732301549755Writing region close event to WAL at 1732301549756 (+1 ms)Running coprocessor post-close hooks at 1732301549761 (+5 ms)Closed at 1732301549761 2024-11-22T18:52:29,761 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301549755Running coprocessor pre-close hooks at 1732301549755Disabling compacts and flushes for region at 1732301549755Disabling writes for close at 1732301549756 (+1 ms)Writing region close event to WAL at 1732301549757 (+1 ms)Running coprocessor post-close hooks at 1732301549761 (+4 ms)Closed at 1732301549761 2024-11-22T18:52:29,761 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T18:52:29,761 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732301524845.87d631ef66d1eba8cb24b6bf7126a2f0. 2024-11-22T18:52:29,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:52:29,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T18:52:29,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T18:52:29,956 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(976): stopping server d79ba0c344fb,38379,1732301523915; all regions closed. 2024-11-22T18:52:29,956 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,956 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,956 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,956 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,957 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:29,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741842_1025 (size=825) 2024-11-22T18:52:29,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741842_1025 (size=825) 2024-11-22T18:52:30,177 INFO [regionserver/d79ba0c344fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:52:30,203 INFO [regionserver/d79ba0c344fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T18:52:30,203 INFO [regionserver/d79ba0c344fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T18:52:30,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:30,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:31,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:31,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:32,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:32,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:33,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:33,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:33,612 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T18:52:33,643 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta after 4002ms 2024-11-22T18:52:33,643 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/WALs/d79ba0c344fb,38379,1732301523915/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta to hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/oldWALs/d79ba0c344fb%2C38379%2C1732301523915.meta.1732301524708.meta 2024-11-22T18:52:33,647 DEBUG [RS:0;d79ba0c344fb:38379 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/oldWALs 2024-11-22T18:52:33,647 INFO [RS:0;d79ba0c344fb:38379 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C38379%2C1732301523915.meta:.meta(num 1732301549636) 2024-11-22T18:52:33,647 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:33,648 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:33,648 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:33,648 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:33,648 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:33,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741840_1023 (size=1162) 2024-11-22T18:52:33,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741840_1023 (size=1162) 2024-11-22T18:52:33,850 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T18:52:34,059 DEBUG [RS:0;d79ba0c344fb:38379 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/oldWALs 2024-11-22T18:52:34,060 INFO [RS:0;d79ba0c344fb:38379 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C38379%2C1732301523915:(num 1732301549587) 2024-11-22T18:52:34,060 DEBUG [RS:0;d79ba0c344fb:38379 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:52:34,060 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:52:34,060 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:52:34,060 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.ChoreService(370): Chore service for: regionserver/d79ba0c344fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T18:52:34,060 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:52:34,060 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:52:34,060 INFO [RS:0;d79ba0c344fb:38379 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38379 2024-11-22T18:52:34,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d79ba0c344fb,38379,1732301523915 2024-11-22T18:52:34,062 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:52:34,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:52:34,064 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d79ba0c344fb,38379,1732301523915] 2024-11-22T18:52:34,066 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d79ba0c344fb,38379,1732301523915 already deleted, retry=false 2024-11-22T18:52:34,066 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d79ba0c344fb,38379,1732301523915 expired; onlineServers=0 2024-11-22T18:52:34,066 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd79ba0c344fb,32959,1732301523867' ***** 2024-11-22T18:52:34,066 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T18:52:34,066 INFO [M:0;d79ba0c344fb:32959 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:52:34,066 INFO [M:0;d79ba0c344fb:32959 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:52:34,066 DEBUG [M:0;d79ba0c344fb:32959 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T18:52:34,066 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T18:52:34,066 DEBUG [M:0;d79ba0c344fb:32959 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T18:52:34,066 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301524093 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301524093,5,FailOnTimeoutGroup] 2024-11-22T18:52:34,067 INFO [M:0;d79ba0c344fb:32959 {}] hbase.ChoreService(370): Chore service for: master/d79ba0c344fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T18:52:34,067 INFO [M:0;d79ba0c344fb:32959 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:52:34,067 DEBUG [M:0;d79ba0c344fb:32959 {}] master.HMaster(1795): Stopping service threads 2024-11-22T18:52:34,067 INFO [M:0;d79ba0c344fb:32959 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T18:52:34,067 INFO [M:0;d79ba0c344fb:32959 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:52:34,067 INFO [M:0;d79ba0c344fb:32959 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T18:52:34,067 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T18:52:34,067 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301524094 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301524094,5,FailOnTimeoutGroup] 2024-11-22T18:52:34,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T18:52:34,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:34,070 DEBUG [M:0;d79ba0c344fb:32959 {}] zookeeper.ZKUtil(347): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T18:52:34,070 WARN [M:0;d79ba0c344fb:32959 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T18:52:34,070 INFO [M:0;d79ba0c344fb:32959 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/.lastflushedseqids 2024-11-22T18:52:34,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741846_1030 (size=130) 2024-11-22T18:52:34,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741846_1030 (size=130) 2024-11-22T18:52:34,092 INFO [M:0;d79ba0c344fb:32959 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T18:52:34,092 INFO [M:0;d79ba0c344fb:32959 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T18:52:34,093 DEBUG [M:0;d79ba0c344fb:32959 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:52:34,093 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:34,093 DEBUG [M:0;d79ba0c344fb:32959 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:34,093 DEBUG [M:0;d79ba0c344fb:32959 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:52:34,093 DEBUG [M:0;d79ba0c344fb:32959 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:34,093 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-22T18:52:34,094 ERROR [FSHLog-0-hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData-prefix:d79ba0c344fb,32959,1732301523867 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:34,094 WARN [FSHLog-0-hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData-prefix:d79ba0c344fb,32959,1732301523867 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:34,094 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog d79ba0c344fb%2C32959%2C1732301523867:(num 1732301524004) roll requested 2024-11-22T18:52:34,094 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C32959%2C1732301523867.1732301554094 2024-11-22T18:52:34,109 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:34,109 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:34,109 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:34,109 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:34,110 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:34,110 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867/d79ba0c344fb%2C32959%2C1732301523867.1732301554094 2024-11-22T18:52:34,111 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:34,111 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45781,DS-647e0fc4-b3cf-4111-b483-31aada013bb6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T18:52:34,111 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 2024-11-22T18:52:34,112 WARN [IPC Server handler 2 on default port 46651 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-22T18:52:34,112 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 after 1ms 2024-11-22T18:52:34,113 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38643:38643),(127.0.0.1/127.0.0.1:37887:37887)] 2024-11-22T18:52:34,113 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 is not closed yet, will try archiving it next time 2024-11-22T18:52:34,132 DEBUG [M:0;d79ba0c344fb:32959 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a0b83da3877d434596359bdd67e1cc00 is 82, key is hbase:meta,,1/info:regioninfo/1732301524739/Put/seqid=0 2024-11-22T18:52:34,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741848_1033 (size=5672) 2024-11-22T18:52:34,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741848_1033 (size=5672) 2024-11-22T18:52:34,137 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a0b83da3877d434596359bdd67e1cc00 2024-11-22T18:52:34,158 DEBUG [M:0;d79ba0c344fb:32959 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/094c7ba94a1c4519bd1443847d882c54 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732301525218/Put/seqid=0 2024-11-22T18:52:34,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741849_1034 (size=6119) 2024-11-22T18:52:34,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741849_1034 (size=6119) 2024-11-22T18:52:34,163 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/094c7ba94a1c4519bd1443847d882c54 2024-11-22T18:52:34,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:52:34,164 INFO [RS:0;d79ba0c344fb:38379 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:52:34,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38379-0x1014105761c0001, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:52:34,165 INFO [RS:0;d79ba0c344fb:38379 {}] regionserver.HRegionServer(1031): Exiting; stopping=d79ba0c344fb,38379,1732301523915; zookeeper connection closed. 2024-11-22T18:52:34,165 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@40e2e903 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@40e2e903 2024-11-22T18:52:34,165 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T18:52:34,190 DEBUG [M:0;d79ba0c344fb:32959 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/991dd7834695411ea4f3266860112cee is 69, key is d79ba0c344fb,38379,1732301523915/rs:state/1732301524158/Put/seqid=0 2024-11-22T18:52:34,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741850_1035 (size=5156) 2024-11-22T18:52:34,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741850_1035 (size=5156) 2024-11-22T18:52:34,196 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/991dd7834695411ea4f3266860112cee 2024-11-22T18:52:34,215 DEBUG [M:0;d79ba0c344fb:32959 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/68fe8185d9c2430f89a530c16da6bd56 is 52, key is load_balancer_on/state:d/1732301524841/Put/seqid=0 2024-11-22T18:52:34,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741851_1036 (size=5056) 2024-11-22T18:52:34,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741851_1036 (size=5056) 2024-11-22T18:52:34,225 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/68fe8185d9c2430f89a530c16da6bd56 2024-11-22T18:52:34,230 DEBUG [M:0;d79ba0c344fb:32959 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a0b83da3877d434596359bdd67e1cc00 as hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a0b83da3877d434596359bdd67e1cc00 2024-11-22T18:52:34,234 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a0b83da3877d434596359bdd67e1cc00, entries=8, sequenceid=56, filesize=5.5 K 2024-11-22T18:52:34,235 DEBUG [M:0;d79ba0c344fb:32959 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/094c7ba94a1c4519bd1443847d882c54 as hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/094c7ba94a1c4519bd1443847d882c54 2024-11-22T18:52:34,240 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/094c7ba94a1c4519bd1443847d882c54, entries=6, sequenceid=56, filesize=6.0 K 2024-11-22T18:52:34,241 DEBUG [M:0;d79ba0c344fb:32959 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/991dd7834695411ea4f3266860112cee as hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/991dd7834695411ea4f3266860112cee 2024-11-22T18:52:34,245 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/991dd7834695411ea4f3266860112cee, entries=1, sequenceid=56, filesize=5.0 K 2024-11-22T18:52:34,246 DEBUG [M:0;d79ba0c344fb:32959 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/68fe8185d9c2430f89a530c16da6bd56 as hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/68fe8185d9c2430f89a530c16da6bd56 2024-11-22T18:52:34,251 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/68fe8185d9c2430f89a530c16da6bd56, entries=1, sequenceid=56, filesize=4.9 K 2024-11-22T18:52:34,252 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 159ms, sequenceid=56, compaction requested=false 2024-11-22T18:52:34,253 INFO [M:0;d79ba0c344fb:32959 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:34,253 DEBUG [M:0;d79ba0c344fb:32959 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301554093Disabling compacts and flushes for region at 1732301554093Disabling writes for close at 1732301554093Obtaining lock to block concurrent updates at 1732301554093Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732301554093Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732301554093Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732301554113 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732301554113Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732301554131 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732301554131Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732301554143 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732301554157 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732301554157Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732301554168 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732301554189 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732301554190 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732301554200 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732301554214 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732301554214Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60758ef3: reopening flushed file at 1732301554229 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49a8ded3: reopening flushed file at 1732301554234 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c81d761: reopening flushed file at 1732301554240 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e460d97: reopening flushed file at 1732301554245 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 159ms, sequenceid=56, compaction requested=false at 1732301554252 (+7 ms)Writing region close event to WAL at 1732301554253 (+1 ms)Closed at 1732301554253 2024-11-22T18:52:34,254 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:34,254 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:34,254 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:34,254 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:34,254 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:52:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45249 is added to blk_1073741847_1031 (size=757) 2024-11-22T18:52:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35359 is added to blk_1073741847_1031 (size=757) 2024-11-22T18:52:34,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:34,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:34,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:34,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,295 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T18:52:35,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:35,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:35,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:36,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:36,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:36,612 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T18:52:37,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:37,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:38,113 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 after 4002ms 2024-11-22T18:52:38,114 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/WALs/d79ba0c344fb,32959,1732301523867/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 to hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/oldWALs/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 2024-11-22T18:52:38,117 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/MasterData/oldWALs/d79ba0c344fb%2C32959%2C1732301523867.1732301524004 to hdfs://localhost:46651/user/jenkins/test-data/22d09cd6-bfb1-ec9e-9c5e-da962f0d2e99/oldWALs/d79ba0c344fb%2C32959%2C1732301523867.1732301524004$masterlocalwal$ 2024-11-22T18:52:38,117 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:52:38,117 INFO [M:0;d79ba0c344fb:32959 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T18:52:38,118 INFO [M:0;d79ba0c344fb:32959 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32959 2024-11-22T18:52:38,118 INFO [M:0;d79ba0c344fb:32959 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:52:38,220 INFO [M:0;d79ba0c344fb:32959 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:52:38,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:52:38,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32959-0x1014105761c0000, quorum=127.0.0.1:51025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:52:38,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@652ca842{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:38,223 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d802677{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:52:38,223 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:52:38,223 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64685bd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:52:38,223 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49f94f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,STOPPED} 2024-11-22T18:52:38,224 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:52:38,224 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1703702927-172.17.0.2-1732301523030 (Datanode Uuid c0a494c7-167c-424b-9e54-5b31acf57666) service to localhost/127.0.0.1:46651 2024-11-22T18:52:38,224 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:52:38,224 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:52:38,225 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data3/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:38,225 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data4/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:38,225 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:52:38,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67d70e61{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:38,228 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f1a012{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:52:38,228 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:52:38,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57d5f4b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:52:38,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f8a0d0d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,STOPPED} 2024-11-22T18:52:38,229 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:52:38,229 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:52:38,229 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:52:38,229 WARN [BP-1703702927-172.17.0.2-1732301523030 heartbeating to localhost/127.0.0.1:46651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1703702927-172.17.0.2-1732301523030 (Datanode Uuid 6573a035-60e5-4acc-9a47-16eae730f01f) service to localhost/127.0.0.1:46651 2024-11-22T18:52:38,229 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data1/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:38,230 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/cluster_6f545e2e-bed3-1cd6-45bc-f784bad987a4/data/data2/current/BP-1703702927-172.17.0.2-1732301523030 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:52:38,230 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:52:38,235 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4702e786{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:52:38,236 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38a1581{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:52:38,236 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:52:38,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ddfefdc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:52:38,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13c2425d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir/,STOPPED} 2024-11-22T18:52:38,242 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T18:52:38,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T18:52:38,269 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46651 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46651 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46651 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:46651 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46651 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46651 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46651 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46651 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 454) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=117 (was 141), ProcessCount=11 (was 11), AvailableMemoryMB=7964 (was 7629) - AvailableMemoryMB LEAK? - 2024-11-22T18:52:38,277 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=117, ProcessCount=11, AvailableMemoryMB=7964 2024-11-22T18:52:38,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T18:52:38,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.log.dir so I do NOT create it in target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f 2024-11-22T18:52:38,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70cded03-1906-38ee-9529-3992d66f34e6/hadoop.tmp.dir so I do NOT create it in target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f 2024-11-22T18:52:38,277 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48, deleteOnExit=true 2024-11-22T18:52:38,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T18:52:38,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/test.cache.data in system properties and HBase conf 2024-11-22T18:52:38,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/hadoop.log.dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T18:52:38,278 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T18:52:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/nfs.dump.dir in system properties and HBase conf 2024-11-22T18:52:38,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/java.io.tmpdir in system properties and HBase conf 2024-11-22T18:52:38,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:52:38,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T18:52:38,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T18:52:38,291 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:52:38,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:38,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:38,385 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:38,391 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:52:38,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:52:38,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:52:38,393 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:52:38,394 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:38,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bbcc3bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:52:38,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7638bdc6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:52:38,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@48b18cf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/java.io.tmpdir/jetty-localhost-41699-hadoop-hdfs-3_4_1-tests_jar-_-any-2717296245753533410/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:52:38,550 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30eae670{HTTP/1.1, (http/1.1)}{localhost:41699} 2024-11-22T18:52:38,551 INFO [Time-limited test {}] server.Server(415): Started @183940ms 2024-11-22T18:52:38,569 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:52:38,646 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:38,650 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:52:38,651 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:52:38,651 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:52:38,651 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:52:38,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa328f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:52:38,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a5f2a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:52:38,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ff27683{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/java.io.tmpdir/jetty-localhost-42939-hadoop-hdfs-3_4_1-tests_jar-_-any-9040132988664249705/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:38,780 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38f5461{HTTP/1.1, (http/1.1)}{localhost:42939} 2024-11-22T18:52:38,781 INFO [Time-limited test {}] server.Server(415): Started @184170ms 2024-11-22T18:52:38,782 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:52:38,810 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:52:38,812 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:52:38,813 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:52:38,813 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:52:38,813 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:52:38,813 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16aeea80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:52:38,814 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67c2b9b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:52:38,885 WARN [Thread-1644 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/data/data1/current/BP-1599718698-172.17.0.2-1732301558309/current, will proceed with Du for space computation calculation, 2024-11-22T18:52:38,886 WARN [Thread-1645 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/data/data2/current/BP-1599718698-172.17.0.2-1732301558309/current, will proceed with Du for space computation calculation, 2024-11-22T18:52:38,907 WARN [Thread-1623 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:52:38,909 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb16a9141d5631985 with lease ID 0x989dc634a84eb2a6: Processing first storage report for DS-fdca8098-0288-47f8-8195-b175f4f49569 from datanode DatanodeRegistration(127.0.0.1:42649, datanodeUuid=94a7c4de-dd99-4213-8ebf-a7f55b69c462, infoPort=45141, infoSecurePort=0, ipcPort=40557, storageInfo=lv=-57;cid=testClusterID;nsid=1095991636;c=1732301558309) 2024-11-22T18:52:38,909 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb16a9141d5631985 with lease ID 0x989dc634a84eb2a6: from storage DS-fdca8098-0288-47f8-8195-b175f4f49569 node DatanodeRegistration(127.0.0.1:42649, datanodeUuid=94a7c4de-dd99-4213-8ebf-a7f55b69c462, infoPort=45141, infoSecurePort=0, ipcPort=40557, storageInfo=lv=-57;cid=testClusterID;nsid=1095991636;c=1732301558309), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:38,910 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb16a9141d5631985 with lease ID 0x989dc634a84eb2a6: Processing first storage report for DS-8df3c9de-17c0-420d-ba54-5539655b6cdd from datanode DatanodeRegistration(127.0.0.1:42649, datanodeUuid=94a7c4de-dd99-4213-8ebf-a7f55b69c462, infoPort=45141, infoSecurePort=0, ipcPort=40557, storageInfo=lv=-57;cid=testClusterID;nsid=1095991636;c=1732301558309) 2024-11-22T18:52:38,910 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb16a9141d5631985 with lease ID 0x989dc634a84eb2a6: from storage DS-8df3c9de-17c0-420d-ba54-5539655b6cdd node DatanodeRegistration(127.0.0.1:42649, datanodeUuid=94a7c4de-dd99-4213-8ebf-a7f55b69c462, infoPort=45141, infoSecurePort=0, ipcPort=40557, storageInfo=lv=-57;cid=testClusterID;nsid=1095991636;c=1732301558309), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T18:52:38,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76b2d62a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/java.io.tmpdir/jetty-localhost-46483-hadoop-hdfs-3_4_1-tests_jar-_-any-17025312278897554149/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:52:38,935 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40c8737d{HTTP/1.1, (http/1.1)}{localhost:46483} 2024-11-22T18:52:38,935 INFO [Time-limited test {}] server.Server(415): Started @184324ms 2024-11-22T18:52:38,936 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:52:39,032 WARN [Thread-1671 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/data/data4/current/BP-1599718698-172.17.0.2-1732301558309/current, will proceed with Du for space computation calculation, 2024-11-22T18:52:39,032 WARN [Thread-1670 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/data/data3/current/BP-1599718698-172.17.0.2-1732301558309/current, will proceed with Du for space computation calculation, 2024-11-22T18:52:39,050 WARN [Thread-1659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:52:39,052 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf49e55424a36a41f with lease ID 0x989dc634a84eb2a7: Processing first storage report for DS-75e8f694-66a4-4e93-94ac-ed001997067a from datanode DatanodeRegistration(127.0.0.1:34905, datanodeUuid=8aa88aa0-6399-4715-9ca7-4e3c1a2cdc84, infoPort=32883, infoSecurePort=0, ipcPort=41737, storageInfo=lv=-57;cid=testClusterID;nsid=1095991636;c=1732301558309) 2024-11-22T18:52:39,052 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf49e55424a36a41f with lease ID 0x989dc634a84eb2a7: from storage DS-75e8f694-66a4-4e93-94ac-ed001997067a node DatanodeRegistration(127.0.0.1:34905, datanodeUuid=8aa88aa0-6399-4715-9ca7-4e3c1a2cdc84, infoPort=32883, infoSecurePort=0, ipcPort=41737, storageInfo=lv=-57;cid=testClusterID;nsid=1095991636;c=1732301558309), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:39,052 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf49e55424a36a41f with lease ID 0x989dc634a84eb2a7: Processing first storage report for DS-54be71b5-d291-49ee-93f8-b220f5a657e3 from datanode DatanodeRegistration(127.0.0.1:34905, datanodeUuid=8aa88aa0-6399-4715-9ca7-4e3c1a2cdc84, infoPort=32883, infoSecurePort=0, ipcPort=41737, storageInfo=lv=-57;cid=testClusterID;nsid=1095991636;c=1732301558309) 2024-11-22T18:52:39,052 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf49e55424a36a41f with lease ID 0x989dc634a84eb2a7: from storage DS-54be71b5-d291-49ee-93f8-b220f5a657e3 node DatanodeRegistration(127.0.0.1:34905, datanodeUuid=8aa88aa0-6399-4715-9ca7-4e3c1a2cdc84, infoPort=32883, infoSecurePort=0, ipcPort=41737, storageInfo=lv=-57;cid=testClusterID;nsid=1095991636;c=1732301558309), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:52:39,057 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f 2024-11-22T18:52:39,060 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/zookeeper_0, clientPort=56543, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T18:52:39,060 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56543 2024-11-22T18:52:39,061 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:39,062 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:39,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:52:39,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:52:39,071 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d with version=8 2024-11-22T18:52:39,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/hbase-staging 2024-11-22T18:52:39,073 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:52:39,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:39,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:39,073 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:52:39,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:39,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:52:39,073 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T18:52:39,073 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:52:39,074 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42225 2024-11-22T18:52:39,075 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42225 connecting to ZooKeeper ensemble=127.0.0.1:56543 2024-11-22T18:52:39,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:422250x0, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:52:39,080 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42225-0x1014105ffa30000 connected 2024-11-22T18:52:39,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:39,097 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:39,099 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:52:39,099 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d, hbase.cluster.distributed=false 2024-11-22T18:52:39,100 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:52:39,101 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42225 2024-11-22T18:52:39,101 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42225 2024-11-22T18:52:39,101 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42225 2024-11-22T18:52:39,101 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42225 2024-11-22T18:52:39,101 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42225 2024-11-22T18:52:39,117 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:52:39,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:39,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:39,117 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:52:39,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:52:39,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:52:39,117 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T18:52:39,117 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:52:39,118 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46653 2024-11-22T18:52:39,119 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46653 connecting to ZooKeeper ensemble=127.0.0.1:56543 2024-11-22T18:52:39,119 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:39,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:39,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:466530x0, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:52:39,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:466530x0, quorum=127.0.0.1:56543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:52:39,124 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46653-0x1014105ffa30001 connected 2024-11-22T18:52:39,125 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T18:52:39,125 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T18:52:39,126 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T18:52:39,126 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:52:39,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46653 2024-11-22T18:52:39,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46653 2024-11-22T18:52:39,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46653 2024-11-22T18:52:39,128 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46653 2024-11-22T18:52:39,128 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46653 2024-11-22T18:52:39,139 DEBUG [M:0;d79ba0c344fb:42225 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d79ba0c344fb:42225 2024-11-22T18:52:39,139 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d79ba0c344fb,42225,1732301559072 2024-11-22T18:52:39,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:52:39,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:52:39,142 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d79ba0c344fb,42225,1732301559072 2024-11-22T18:52:39,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T18:52:39,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,144 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T18:52:39,144 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d79ba0c344fb,42225,1732301559072 from backup master directory 2024-11-22T18:52:39,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d79ba0c344fb,42225,1732301559072 2024-11-22T18:52:39,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:52:39,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:52:39,146 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:52:39,146 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d79ba0c344fb,42225,1732301559072 2024-11-22T18:52:39,149 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/hbase.id] with ID: 090970f2-39ef-44da-912a-addd827a808d 2024-11-22T18:52:39,149 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/.tmp/hbase.id 2024-11-22T18:52:39,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:52:39,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:52:39,155 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/.tmp/hbase.id]:[hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/hbase.id] 2024-11-22T18:52:39,165 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:39,165 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T18:52:39,166 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T18:52:39,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:52:39,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:52:39,182 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:52:39,183 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T18:52:39,183 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:52:39,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:52:39,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:52:39,194 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store 2024-11-22T18:52:39,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:52:39,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:52:39,202 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:39,202 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:52:39,202 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:39,202 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:39,202 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:52:39,202 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:39,202 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:52:39,202 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301559202Disabling compacts and flushes for region at 1732301559202Disabling writes for close at 1732301559202Writing region close event to WAL at 1732301559202Closed at 1732301559202 2024-11-22T18:52:39,203 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/.initializing 2024-11-22T18:52:39,203 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/WALs/d79ba0c344fb,42225,1732301559072 2024-11-22T18:52:39,205 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C42225%2C1732301559072, suffix=, logDir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/WALs/d79ba0c344fb,42225,1732301559072, archiveDir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/oldWALs, maxLogs=10 2024-11-22T18:52:39,206 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C42225%2C1732301559072.1732301559205 2024-11-22T18:52:39,215 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/WALs/d79ba0c344fb,42225,1732301559072/d79ba0c344fb%2C42225%2C1732301559072.1732301559205 2024-11-22T18:52:39,216 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32883:32883),(127.0.0.1/127.0.0.1:45141:45141)] 2024-11-22T18:52:39,222 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:52:39,222 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:39,222 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,222 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,224 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,225 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T18:52:39,225 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:39,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,227 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T18:52:39,227 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:52:39,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T18:52:39,230 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:52:39,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,232 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T18:52:39,232 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,232 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:52:39,232 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,233 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,233 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,235 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,235 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,236 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T18:52:39,237 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:52:39,239 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:52:39,239 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702134, jitterRate=-0.10719096660614014}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T18:52:39,240 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732301559223Initializing all the Stores at 1732301559223Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301559223Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301559224 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301559224Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301559224Cleaning up temporary data from old regions at 1732301559235 (+11 ms)Region opened successfully at 1732301559240 (+5 ms) 2024-11-22T18:52:39,240 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T18:52:39,244 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ed0558e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:52:39,245 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T18:52:39,245 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T18:52:39,245 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T18:52:39,245 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T18:52:39,246 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T18:52:39,246 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T18:52:39,246 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T18:52:39,248 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T18:52:39,249 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T18:52:39,252 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T18:52:39,252 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T18:52:39,253 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T18:52:39,254 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T18:52:39,255 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T18:52:39,256 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T18:52:39,257 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T18:52:39,257 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T18:52:39,259 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T18:52:39,261 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T18:52:39,263 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T18:52:39,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:52:39,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:52:39,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,265 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d79ba0c344fb,42225,1732301559072, sessionid=0x1014105ffa30000, setting cluster-up flag (Was=false) 2024-11-22T18:52:39,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,274 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T18:52:39,275 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,42225,1732301559072 2024-11-22T18:52:39,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,285 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T18:52:39,287 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,42225,1732301559072 2024-11-22T18:52:39,288 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T18:52:39,293 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T18:52:39,293 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T18:52:39,293 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T18:52:39,294 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d79ba0c344fb,42225,1732301559072 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T18:52:39,295 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:52:39,295 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:52:39,295 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:52:39,295 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:52:39,295 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d79ba0c344fb:0, corePoolSize=10, maxPoolSize=10 2024-11-22T18:52:39,296 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,296 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:52:39,296 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,299 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732301589299 2024-11-22T18:52:39,299 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T18:52:39,300 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T18:52:39,300 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T18:52:39,300 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T18:52:39,300 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T18:52:39,300 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T18:52:39,300 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,300 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T18:52:39,300 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T18:52:39,300 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T18:52:39,300 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:52:39,301 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T18:52:39,302 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,302 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T18:52:39,304 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T18:52:39,304 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T18:52:39,305 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301559304,5,FailOnTimeoutGroup] 2024-11-22T18:52:39,306 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301559305,5,FailOnTimeoutGroup] 2024-11-22T18:52:39,306 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,306 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T18:52:39,306 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,306 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:52:39,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:52:39,317 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T18:52:39,318 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d 2024-11-22T18:52:39,330 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(746): ClusterId : 090970f2-39ef-44da-912a-addd827a808d 2024-11-22T18:52:39,330 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T18:52:39,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:52:39,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:52:39,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:39,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:52:39,333 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T18:52:39,333 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T18:52:39,334 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:52:39,334 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:39,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:52:39,335 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T18:52:39,336 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:52:39,336 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,336 DEBUG [RS:0;d79ba0c344fb:46653 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7163ccd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:52:39,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:39,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:52:39,339 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:52:39,339 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:39,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:52:39,341 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:52:39,341 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,342 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:39,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:52:39,343 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740 2024-11-22T18:52:39,343 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740 2024-11-22T18:52:39,345 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:52:39,345 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:52:39,345 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:52:39,346 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:52:39,352 DEBUG [RS:0;d79ba0c344fb:46653 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d79ba0c344fb:46653 2024-11-22T18:52:39,352 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T18:52:39,352 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T18:52:39,352 DEBUG [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T18:52:39,352 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:52:39,353 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(2659): reportForDuty to master=d79ba0c344fb,42225,1732301559072 with port=46653, startcode=1732301559116 2024-11-22T18:52:39,353 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745559, jitterRate=-0.05197392404079437}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:52:39,353 DEBUG [RS:0;d79ba0c344fb:46653 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T18:52:39,353 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732301559331Initializing all the Stores at 1732301559332 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301559332Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301559332Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301559332Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301559332Cleaning up temporary data from old regions at 1732301559345 (+13 ms)Region opened successfully at 1732301559353 (+8 ms) 2024-11-22T18:52:39,353 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:52:39,353 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:52:39,353 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:52:39,354 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:52:39,354 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:52:39,356 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:52:39,356 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301559353Disabling compacts and flushes for region at 1732301559353Disabling writes for close at 1732301559354 (+1 ms)Writing region close event to WAL at 1732301559356 (+2 ms)Closed at 1732301559356 2024-11-22T18:52:39,358 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:52:39,358 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T18:52:39,358 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T18:52:39,358 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50703, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T18:52:39,359 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42225 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:39,359 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42225 {}] master.ServerManager(517): Registering regionserver=d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:39,360 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:52:39,361 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T18:52:39,361 DEBUG [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d 2024-11-22T18:52:39,361 DEBUG [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42317 2024-11-22T18:52:39,361 DEBUG [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T18:52:39,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:52:39,364 DEBUG [RS:0;d79ba0c344fb:46653 {}] zookeeper.ZKUtil(111): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:39,364 WARN [RS:0;d79ba0c344fb:46653 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:52:39,364 INFO [RS:0;d79ba0c344fb:46653 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:52:39,364 DEBUG [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:39,364 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d79ba0c344fb,46653,1732301559116] 2024-11-22T18:52:39,369 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T18:52:39,371 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T18:52:39,372 INFO [RS:0;d79ba0c344fb:46653 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T18:52:39,372 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,373 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T18:52:39,374 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T18:52:39,374 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:52:39,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:52:39,374 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:52:39,375 DEBUG [RS:0;d79ba0c344fb:46653 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:52:39,375 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,375 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,375 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,375 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,375 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,375 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,46653,1732301559116-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:52:39,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:39,398 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T18:52:39,399 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,46653,1732301559116-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,399 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,399 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.Replication(171): d79ba0c344fb,46653,1732301559116 started 2024-11-22T18:52:39,415 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,415 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(1482): Serving as d79ba0c344fb,46653,1732301559116, RpcServer on d79ba0c344fb/172.17.0.2:46653, sessionid=0x1014105ffa30001 2024-11-22T18:52:39,415 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T18:52:39,415 DEBUG [RS:0;d79ba0c344fb:46653 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:39,415 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,46653,1732301559116' 2024-11-22T18:52:39,415 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T18:52:39,416 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T18:52:39,416 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T18:52:39,417 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T18:52:39,417 DEBUG [RS:0;d79ba0c344fb:46653 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:39,417 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,46653,1732301559116' 2024-11-22T18:52:39,417 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T18:52:39,417 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T18:52:39,417 DEBUG [RS:0;d79ba0c344fb:46653 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T18:52:39,417 INFO [RS:0;d79ba0c344fb:46653 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T18:52:39,417 INFO [RS:0;d79ba0c344fb:46653 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T18:52:39,511 WARN [d79ba0c344fb:42225 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T18:52:39,519 INFO [RS:0;d79ba0c344fb:46653 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C46653%2C1732301559116, suffix=, logDir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116, archiveDir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/oldWALs, maxLogs=32 2024-11-22T18:52:39,520 INFO [RS:0;d79ba0c344fb:46653 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C46653%2C1732301559116.1732301559520 2024-11-22T18:52:39,528 INFO [RS:0;d79ba0c344fb:46653 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301559520 2024-11-22T18:52:39,533 DEBUG [RS:0;d79ba0c344fb:46653 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32883:32883),(127.0.0.1/127.0.0.1:45141:45141)] 2024-11-22T18:52:39,762 DEBUG [d79ba0c344fb:42225 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T18:52:39,762 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:39,764 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,46653,1732301559116, state=OPENING 2024-11-22T18:52:39,767 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T18:52:39,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:52:39,769 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:52:39,769 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:52:39,769 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,46653,1732301559116}] 2024-11-22T18:52:39,769 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:52:39,922 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T18:52:39,924 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43345, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T18:52:39,927 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T18:52:39,927 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:52:39,929 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C46653%2C1732301559116.meta, suffix=.meta, logDir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116, archiveDir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/oldWALs, maxLogs=32 2024-11-22T18:52:39,929 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C46653%2C1732301559116.meta.1732301559929.meta 2024-11-22T18:52:39,937 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.meta.1732301559929.meta 2024-11-22T18:52:39,939 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45141:45141),(127.0.0.1/127.0.0.1:32883:32883)] 2024-11-22T18:52:39,940 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:52:39,940 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T18:52:39,940 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T18:52:39,940 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T18:52:39,940 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T18:52:39,940 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:39,940 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T18:52:39,940 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T18:52:39,941 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:52:39,942 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:52:39,942 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:39,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:52:39,943 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:52:39,943 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:39,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:52:39,944 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:52:39,944 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:39,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:52:39,945 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:52:39,945 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:39,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:52:39,946 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:52:39,946 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740 2024-11-22T18:52:39,947 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740 2024-11-22T18:52:39,948 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:52:39,948 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:52:39,949 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:52:39,950 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:52:39,950 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=884674, jitterRate=0.12492209672927856}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:52:39,950 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T18:52:39,951 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732301559940Writing region info on filesystem at 1732301559940Initializing all the Stores at 1732301559941 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301559941Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301559941Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301559941Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301559941Cleaning up temporary data from old regions at 1732301559948 (+7 ms)Running coprocessor post-open hooks at 1732301559950 (+2 ms)Region opened successfully at 1732301559951 (+1 ms) 2024-11-22T18:52:39,952 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732301559922 2024-11-22T18:52:39,954 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T18:52:39,954 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T18:52:39,955 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:39,956 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,46653,1732301559116, state=OPEN 2024-11-22T18:52:39,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:52:39,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:52:39,960 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:39,961 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:52:39,961 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:52:39,963 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T18:52:39,963 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,46653,1732301559116 in 192 msec 2024-11-22T18:52:39,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T18:52:39,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-11-22T18:52:39,966 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:52:39,966 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T18:52:39,967 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:52:39,968 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,46653,1732301559116, seqNum=-1] 2024-11-22T18:52:39,968 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:52:39,969 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52005, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:52:39,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 682 msec 2024-11-22T18:52:39,974 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732301559974, completionTime=-1 2024-11-22T18:52:39,974 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T18:52:39,974 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T18:52:39,976 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T18:52:39,976 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732301619976 2024-11-22T18:52:39,976 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732301679976 2024-11-22T18:52:39,976 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-22T18:52:39,976 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42225,1732301559072-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,976 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42225,1732301559072-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,976 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42225,1732301559072-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,976 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d79ba0c344fb:42225, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,976 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,977 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:39,978 DEBUG [master/d79ba0c344fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T18:52:39,979 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.833sec 2024-11-22T18:52:39,979 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T18:52:39,979 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T18:52:39,979 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T18:52:39,980 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T18:52:39,980 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T18:52:39,980 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42225,1732301559072-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:52:39,980 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42225,1732301559072-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T18:52:39,982 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T18:52:39,982 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T18:52:39,982 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42225,1732301559072-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:52:40,030 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@750c8565, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:52:40,030 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d79ba0c344fb,42225,-1 for getting cluster id 2024-11-22T18:52:40,030 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T18:52:40,032 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '090970f2-39ef-44da-912a-addd827a808d' 2024-11-22T18:52:40,032 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T18:52:40,033 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "090970f2-39ef-44da-912a-addd827a808d" 2024-11-22T18:52:40,033 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23273074, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:52:40,033 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d79ba0c344fb,42225,-1] 2024-11-22T18:52:40,033 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T18:52:40,033 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:52:40,034 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41656, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T18:52:40,035 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd1d692, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:52:40,036 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:52:40,036 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,46653,1732301559116, seqNum=-1] 2024-11-22T18:52:40,037 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:52:40,038 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39052, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:52:40,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d79ba0c344fb,42225,1732301559072 2024-11-22T18:52:40,039 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:52:40,042 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T18:52:40,042 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T18:52:40,043 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is d79ba0c344fb,42225,1732301559072 2024-11-22T18:52:40,043 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@58e81caf 2024-11-22T18:52:40,043 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T18:52:40,044 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41672, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T18:52:40,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T18:52:40,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T18:52:40,045 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:52:40,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:52:40,047 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T18:52:40,047 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:40,048 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-22T18:52:40,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T18:52:40,049 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T18:52:40,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741835_1011 (size=405) 2024-11-22T18:52:40,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741835_1011 (size=405) 2024-11-22T18:52:40,058 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 79fec7cb106296c409d286c6b19cf329, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d 2024-11-22T18:52:40,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741836_1012 (size=88) 2024-11-22T18:52:40,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741836_1012 (size=88) 2024-11-22T18:52:40,064 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:40,064 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 79fec7cb106296c409d286c6b19cf329, disabling compactions & flushes 2024-11-22T18:52:40,064 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:52:40,064 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:52:40,064 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. after waiting 0 ms 2024-11-22T18:52:40,064 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:52:40,064 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:52:40,064 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 79fec7cb106296c409d286c6b19cf329: Waiting for close lock at 1732301560064Disabling compacts and flushes for region at 1732301560064Disabling writes for close at 1732301560064Writing region close event to WAL at 1732301560064Closed at 1732301560064 2024-11-22T18:52:40,066 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T18:52:40,066 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732301560066"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732301560066"}]},"ts":"1732301560066"} 2024-11-22T18:52:40,068 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T18:52:40,069 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T18:52:40,069 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732301560069"}]},"ts":"1732301560069"} 2024-11-22T18:52:40,071 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-22T18:52:40,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=79fec7cb106296c409d286c6b19cf329, ASSIGN}] 2024-11-22T18:52:40,073 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=79fec7cb106296c409d286c6b19cf329, ASSIGN 2024-11-22T18:52:40,074 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=79fec7cb106296c409d286c6b19cf329, ASSIGN; state=OFFLINE, location=d79ba0c344fb,46653,1732301559116; forceNewPlan=false, retain=false 2024-11-22T18:52:40,224 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=79fec7cb106296c409d286c6b19cf329, regionState=OPENING, regionLocation=d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:40,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=79fec7cb106296c409d286c6b19cf329, ASSIGN because future has completed 2024-11-22T18:52:40,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 79fec7cb106296c409d286c6b19cf329, server=d79ba0c344fb,46653,1732301559116}] 2024-11-22T18:52:40,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:40,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:40,384 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:52:40,384 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 79fec7cb106296c409d286c6b19cf329, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:52:40,384 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,384 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:52:40,384 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,384 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,386 INFO [StoreOpener-79fec7cb106296c409d286c6b19cf329-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,387 INFO [StoreOpener-79fec7cb106296c409d286c6b19cf329-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 79fec7cb106296c409d286c6b19cf329 columnFamilyName info 2024-11-22T18:52:40,387 DEBUG [StoreOpener-79fec7cb106296c409d286c6b19cf329-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:52:40,387 INFO [StoreOpener-79fec7cb106296c409d286c6b19cf329-1 {}] regionserver.HStore(327): Store=79fec7cb106296c409d286c6b19cf329/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:52:40,387 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,388 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,388 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,389 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,389 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,390 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,392 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:52:40,392 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 79fec7cb106296c409d286c6b19cf329; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783610, jitterRate=-0.003588661551475525}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T18:52:40,392 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:52:40,393 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 79fec7cb106296c409d286c6b19cf329: Running coprocessor pre-open hook at 1732301560385Writing region info on filesystem at 1732301560385Initializing all the Stores at 1732301560385Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301560385Cleaning up temporary data from old regions at 1732301560389 (+4 ms)Running coprocessor post-open hooks at 1732301560392 (+3 ms)Region opened successfully at 1732301560393 (+1 ms) 2024-11-22T18:52:40,394 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329., pid=6, masterSystemTime=1732301560380 2024-11-22T18:52:40,397 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:52:40,397 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:52:40,398 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=79fec7cb106296c409d286c6b19cf329, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,46653,1732301559116 2024-11-22T18:52:40,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 79fec7cb106296c409d286c6b19cf329, server=d79ba0c344fb,46653,1732301559116 because future has completed 2024-11-22T18:52:40,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T18:52:40,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 79fec7cb106296c409d286c6b19cf329, server=d79ba0c344fb,46653,1732301559116 in 175 msec 2024-11-22T18:52:40,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T18:52:40,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=79fec7cb106296c409d286c6b19cf329, ASSIGN in 334 msec 2024-11-22T18:52:40,409 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T18:52:40,409 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732301560409"}]},"ts":"1732301560409"} 2024-11-22T18:52:40,410 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-22T18:52:40,411 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T18:52:40,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 366 msec 2024-11-22T18:52:41,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:41,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:42,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:42,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:43,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:43,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:44,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:44,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:45,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:45,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:45,442 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T18:52:45,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,470 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,470 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:52:45,479 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T18:52:45,479 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-22T18:52:46,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:46,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:47,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:47,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:48,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:48,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:49,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:49,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:49,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T18:52:49,873 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T18:52:49,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:52:49,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T18:52:49,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:52:49,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T18:52:50,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T18:52:50,098 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T18:52:50,098 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-22T18:52:50,101 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:52:50,101 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:52:50,105 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329., hostname=d79ba0c344fb,46653,1732301559116, seqNum=2] 2024-11-22T18:52:50,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:52:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:52:50,118 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T18:52:50,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T18:52:50,119 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T18:52:50,121 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T18:52:50,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46653 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-22T18:52:50,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:52:50,282 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 79fec7cb106296c409d286c6b19cf329 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T18:52:50,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/dba8b0aa3fed48269922f562971b7e39 is 1080, key is row0001/info:/1732301570106/Put/seqid=0 2024-11-22T18:52:50,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741837_1013 (size=6033) 2024-11-22T18:52:50,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741837_1013 (size=6033) 2024-11-22T18:52:50,324 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/dba8b0aa3fed48269922f562971b7e39 2024-11-22T18:52:50,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/dba8b0aa3fed48269922f562971b7e39 as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/dba8b0aa3fed48269922f562971b7e39 2024-11-22T18:52:50,342 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/dba8b0aa3fed48269922f562971b7e39, entries=1, sequenceid=5, filesize=5.9 K 2024-11-22T18:52:50,344 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 79fec7cb106296c409d286c6b19cf329 in 61ms, sequenceid=5, compaction requested=false 2024-11-22T18:52:50,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 79fec7cb106296c409d286c6b19cf329: 2024-11-22T18:52:50,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:52:50,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-22T18:52:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-22T18:52:50,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-22T18:52:50,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 229 msec 2024-11-22T18:52:50,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 242 msec 2024-11-22T18:52:50,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:50,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:51,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:51,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:52,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:52,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:53,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:53,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:54,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:54,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:55,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:55,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:56,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:56,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:57,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:57,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:58,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:58,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:59,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:52:59,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T18:53:00,167 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T18:53:00,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:53:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:53:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-22T18:53:00,173 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T18:53:00,173 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T18:53:00,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T18:53:00,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46653 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-22T18:53:00,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:00,327 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 79fec7cb106296c409d286c6b19cf329 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T18:53:00,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/f95bcbfc86744cadb0ce0a798b9c2ac3 is 1080, key is row0002/info:/1732301580169/Put/seqid=0 2024-11-22T18:53:00,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741838_1014 (size=6033) 2024-11-22T18:53:00,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741838_1014 (size=6033) 2024-11-22T18:53:00,338 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/f95bcbfc86744cadb0ce0a798b9c2ac3 2024-11-22T18:53:00,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/f95bcbfc86744cadb0ce0a798b9c2ac3 as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f95bcbfc86744cadb0ce0a798b9c2ac3 2024-11-22T18:53:00,350 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f95bcbfc86744cadb0ce0a798b9c2ac3, entries=1, sequenceid=9, filesize=5.9 K 2024-11-22T18:53:00,351 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 79fec7cb106296c409d286c6b19cf329 in 24ms, sequenceid=9, compaction requested=false 2024-11-22T18:53:00,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 79fec7cb106296c409d286c6b19cf329: 2024-11-22T18:53:00,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:00,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-22T18:53:00,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-22T18:53:00,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-22T18:53:00,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-11-22T18:53:00,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-11-22T18:53:00,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:00,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:01,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:01,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:02,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:02,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:03,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:03,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:04,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:04,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:05,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:05,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:05,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta after 68037ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:53:05,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 after 68049ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T18:53:06,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:06,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:07,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:07,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:08,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:08,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:09,057 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T18:53:09,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:09,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:10,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-22T18:53:10,177 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T18:53:10,180 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C46653%2C1732301559116.1732301590180 2024-11-22T18:53:10,186 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:10,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:10,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:10,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:10,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:10,187 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301559520 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301590180 2024-11-22T18:53:10,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741833_1009 (size=5546) 2024-11-22T18:53:10,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741833_1009 (size=5546) 2024-11-22T18:53:10,192 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32883:32883),(127.0.0.1/127.0.0.1:45141:45141)] 2024-11-22T18:53:10,193 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:53:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:53:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-22T18:53:10,196 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T18:53:10,197 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T18:53:10,197 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T18:53:10,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46653 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-22T18:53:10,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:10,351 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 79fec7cb106296c409d286c6b19cf329 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T18:53:10,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/f0bbc4eb13444304a5471aadd6eb308a is 1080, key is row0003/info:/1732301590178/Put/seqid=0 2024-11-22T18:53:10,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741840_1016 (size=6033) 2024-11-22T18:53:10,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741840_1016 (size=6033) 2024-11-22T18:53:10,361 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/f0bbc4eb13444304a5471aadd6eb308a 2024-11-22T18:53:10,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/f0bbc4eb13444304a5471aadd6eb308a as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f0bbc4eb13444304a5471aadd6eb308a 2024-11-22T18:53:10,372 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f0bbc4eb13444304a5471aadd6eb308a, entries=1, sequenceid=13, filesize=5.9 K 2024-11-22T18:53:10,373 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 79fec7cb106296c409d286c6b19cf329 in 22ms, sequenceid=13, compaction requested=true 2024-11-22T18:53:10,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 79fec7cb106296c409d286c6b19cf329: 2024-11-22T18:53:10,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:10,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-22T18:53:10,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-22T18:53:10,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-22T18:53:10,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-22T18:53:10,381 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-22T18:53:10,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:10,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:11,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:11,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:12,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:12,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:13,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:13,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:14,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:14,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:15,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:15,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:16,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:16,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:17,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:17,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:18,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:18,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:19,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:19,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:19,999 INFO [master/d79ba0c344fb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T18:53:20,000 INFO [master/d79ba0c344fb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T18:53:20,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-22T18:53:20,248 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T18:53:20,248 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:53:20,249 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:53:20,250 DEBUG [Time-limited test {}] regionserver.HStore(1541): 79fec7cb106296c409d286c6b19cf329/info is initiating minor compaction (all files) 2024-11-22T18:53:20,250 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T18:53:20,250 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:20,250 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 79fec7cb106296c409d286c6b19cf329/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:20,250 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/dba8b0aa3fed48269922f562971b7e39, hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f95bcbfc86744cadb0ce0a798b9c2ac3, hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f0bbc4eb13444304a5471aadd6eb308a] into tmpdir=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp, totalSize=17.7 K 2024-11-22T18:53:20,250 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting dba8b0aa3fed48269922f562971b7e39, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732301570106 2024-11-22T18:53:20,251 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f95bcbfc86744cadb0ce0a798b9c2ac3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732301580169 2024-11-22T18:53:20,251 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f0bbc4eb13444304a5471aadd6eb308a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732301590178 2024-11-22T18:53:20,263 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 79fec7cb106296c409d286c6b19cf329#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:53:20,264 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/0d99b7abdca941d6833d50bd995bb387 is 1080, key is row0001/info:/1732301570106/Put/seqid=0 2024-11-22T18:53:20,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741841_1017 (size=8296) 2024-11-22T18:53:20,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741841_1017 (size=8296) 2024-11-22T18:53:20,275 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/0d99b7abdca941d6833d50bd995bb387 as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/0d99b7abdca941d6833d50bd995bb387 2024-11-22T18:53:20,282 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 79fec7cb106296c409d286c6b19cf329/info of 79fec7cb106296c409d286c6b19cf329 into 0d99b7abdca941d6833d50bd995bb387(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:53:20,282 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 79fec7cb106296c409d286c6b19cf329: 2024-11-22T18:53:20,285 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C46653%2C1732301559116.1732301600285 2024-11-22T18:53:20,292 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:20,292 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:20,292 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:20,292 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:20,292 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:20,292 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301590180 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301600285 2024-11-22T18:53:20,293 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32883:32883),(127.0.0.1/127.0.0.1:45141:45141)] 2024-11-22T18:53:20,293 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301590180 is not closed yet, will try archiving it next time 2024-11-22T18:53:20,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741839_1015 (size=2520) 2024-11-22T18:53:20,294 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301559520 to hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/oldWALs/d79ba0c344fb%2C46653%2C1732301559116.1732301559520 2024-11-22T18:53:20,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741839_1015 (size=2520) 2024-11-22T18:53:20,295 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:53:20,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:53:20,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-22T18:53:20,297 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T18:53:20,298 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T18:53:20,298 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T18:53:20,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:20,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:20,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46653 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-22T18:53:20,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:20,452 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 79fec7cb106296c409d286c6b19cf329 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T18:53:20,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/2a45e5626525467b95eb8c72a935b2c7 is 1080, key is row0000/info:/1732301600283/Put/seqid=0 2024-11-22T18:53:20,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741843_1019 (size=6033) 2024-11-22T18:53:20,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741843_1019 (size=6033) 2024-11-22T18:53:20,462 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/2a45e5626525467b95eb8c72a935b2c7 2024-11-22T18:53:20,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/2a45e5626525467b95eb8c72a935b2c7 as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/2a45e5626525467b95eb8c72a935b2c7 2024-11-22T18:53:20,474 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/2a45e5626525467b95eb8c72a935b2c7, entries=1, sequenceid=18, filesize=5.9 K 2024-11-22T18:53:20,475 INFO [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 79fec7cb106296c409d286c6b19cf329 in 24ms, sequenceid=18, compaction requested=false 2024-11-22T18:53:20,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 79fec7cb106296c409d286c6b19cf329: 2024-11-22T18:53:20,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:20,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-22T18:53:20,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-22T18:53:20,481 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-22T18:53:20,481 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-22T18:53:20,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-22T18:53:21,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:21,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:22,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:22,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:23,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:23,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:24,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:24,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:25,384 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 79fec7cb106296c409d286c6b19cf329, had cached 0 bytes from a total of 14329 2024-11-22T18:53:25,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:25,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:26,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:26,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:27,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:27,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:28,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:28,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:29,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:29,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-22T18:53:30,377 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T18:53:30,380 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C46653%2C1732301559116.1732301610380 2024-11-22T18:53:30,386 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,386 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,386 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,386 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,386 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,387 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301600285 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301610380 2024-11-22T18:53:30,387 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45141:45141),(127.0.0.1/127.0.0.1:32883:32883)] 2024-11-22T18:53:30,387 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301600285 is not closed yet, will try archiving it next time 2024-11-22T18:53:30,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T18:53:30,387 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/WALs/d79ba0c344fb,46653,1732301559116/d79ba0c344fb%2C46653%2C1732301559116.1732301590180 to hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/oldWALs/d79ba0c344fb%2C46653%2C1732301559116.1732301590180 2024-11-22T18:53:30,387 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:53:30,388 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:53:30,388 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:53:30,388 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:53:30,388 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T18:53:30,388 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T18:53:30,388 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=390080786, stopped=false 2024-11-22T18:53:30,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741842_1018 (size=2026) 2024-11-22T18:53:30,388 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d79ba0c344fb,42225,1732301559072 2024-11-22T18:53:30,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741842_1018 (size=2026) 2024-11-22T18:53:30,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:53:30,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:30,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:53:30,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:30,392 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:53:30,392 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:53:30,392 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:53:30,392 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:53:30,393 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:53:30,393 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd79ba0c344fb,46653,1732301559116' ***** 2024-11-22T18:53:30,393 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T18:53:30,393 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:53:30,393 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T18:53:30,393 INFO [RS:0;d79ba0c344fb:46653 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T18:53:30,393 INFO [RS:0;d79ba0c344fb:46653 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T18:53:30,393 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T18:53:30,393 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(3091): Received CLOSE for 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:53:30,393 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(959): stopping server d79ba0c344fb,46653,1732301559116 2024-11-22T18:53:30,393 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:53:30,393 INFO [RS:0;d79ba0c344fb:46653 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d79ba0c344fb:46653. 2024-11-22T18:53:30,393 DEBUG [RS:0;d79ba0c344fb:46653 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:53:30,393 DEBUG [RS:0;d79ba0c344fb:46653 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:53:30,394 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T18:53:30,394 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 79fec7cb106296c409d286c6b19cf329, disabling compactions & flushes 2024-11-22T18:53:30,394 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T18:53:30,394 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T18:53:30,394 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:30,394 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T18:53:30,394 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:30,394 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. after waiting 0 ms 2024-11-22T18:53:30,394 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:30,394 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 79fec7cb106296c409d286c6b19cf329 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T18:53:30,394 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T18:53:30,394 DEBUG [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 79fec7cb106296c409d286c6b19cf329=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.} 2024-11-22T18:53:30,394 DEBUG [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 79fec7cb106296c409d286c6b19cf329 2024-11-22T18:53:30,394 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:53:30,394 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:53:30,394 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:53:30,394 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:53:30,394 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:53:30,394 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-22T18:53:30,398 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/49a76d3d1e544f40a45f615ea803a1ea is 1080, key is row0001/info:/1732301610379/Put/seqid=0 2024-11-22T18:53:30,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741845_1021 (size=6033) 2024-11-22T18:53:30,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741845_1021 (size=6033) 2024-11-22T18:53:30,403 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/49a76d3d1e544f40a45f615ea803a1ea 2024-11-22T18:53:30,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:30,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:30,409 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/.tmp/info/49a76d3d1e544f40a45f615ea803a1ea as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/49a76d3d1e544f40a45f615ea803a1ea 2024-11-22T18:53:30,411 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/.tmp/info/498e4999810147bd98c11caf0a197784 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329./info:regioninfo/1732301560397/Put/seqid=0 2024-11-22T18:53:30,414 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/49a76d3d1e544f40a45f615ea803a1ea, entries=1, sequenceid=22, filesize=5.9 K 2024-11-22T18:53:30,415 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 79fec7cb106296c409d286c6b19cf329 in 21ms, sequenceid=22, compaction requested=true 2024-11-22T18:53:30,415 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/dba8b0aa3fed48269922f562971b7e39, hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f95bcbfc86744cadb0ce0a798b9c2ac3, hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f0bbc4eb13444304a5471aadd6eb308a] to archive 2024-11-22T18:53:30,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741846_1022 (size=7308) 2024-11-22T18:53:30,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741846_1022 (size=7308) 2024-11-22T18:53:30,416 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/.tmp/info/498e4999810147bd98c11caf0a197784 2024-11-22T18:53:30,416 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T18:53:30,418 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/dba8b0aa3fed48269922f562971b7e39 to hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/dba8b0aa3fed48269922f562971b7e39 2024-11-22T18:53:30,419 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f95bcbfc86744cadb0ce0a798b9c2ac3 to hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f95bcbfc86744cadb0ce0a798b9c2ac3 2024-11-22T18:53:30,421 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f0bbc4eb13444304a5471aadd6eb308a to hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/info/f0bbc4eb13444304a5471aadd6eb308a 2024-11-22T18:53:30,421 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d79ba0c344fb:42225 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-22T18:53:30,421 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [dba8b0aa3fed48269922f562971b7e39=6033, f95bcbfc86744cadb0ce0a798b9c2ac3=6033, f0bbc4eb13444304a5471aadd6eb308a=6033] 2024-11-22T18:53:30,425 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/79fec7cb106296c409d286c6b19cf329/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-22T18:53:30,426 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:30,426 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 79fec7cb106296c409d286c6b19cf329: Waiting for close lock at 1732301610393Running coprocessor pre-close hooks at 1732301610393Disabling compacts and flushes for region at 1732301610393Disabling writes for close at 1732301610394 (+1 ms)Obtaining lock to block concurrent updates at 1732301610394Preparing flush snapshotting stores in 79fec7cb106296c409d286c6b19cf329 at 1732301610394Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732301610394Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. at 1732301610395 (+1 ms)Flushing 79fec7cb106296c409d286c6b19cf329/info: creating writer at 1732301610395Flushing 79fec7cb106296c409d286c6b19cf329/info: appending metadata at 1732301610397 (+2 ms)Flushing 79fec7cb106296c409d286c6b19cf329/info: closing flushed file at 1732301610397Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38ea21fc: reopening flushed file at 1732301610408 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 79fec7cb106296c409d286c6b19cf329 in 21ms, sequenceid=22, compaction requested=true at 1732301610415 (+7 ms)Writing region close event to WAL at 1732301610422 (+7 ms)Running coprocessor post-close hooks at 1732301610425 (+3 ms)Closed at 1732301610426 (+1 ms) 2024-11-22T18:53:30,426 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732301560044.79fec7cb106296c409d286c6b19cf329. 2024-11-22T18:53:30,436 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/.tmp/ns/4b335a4cb5a940f9adbcc1f8618b7023 is 43, key is default/ns:d/1732301559970/Put/seqid=0 2024-11-22T18:53:30,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741847_1023 (size=5153) 2024-11-22T18:53:30,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741847_1023 (size=5153) 2024-11-22T18:53:30,441 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/.tmp/ns/4b335a4cb5a940f9adbcc1f8618b7023 2024-11-22T18:53:30,465 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/.tmp/table/cf0a11748fb544369470ccd450dfcd8a is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732301560409/Put/seqid=0 2024-11-22T18:53:30,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741848_1024 (size=5508) 2024-11-22T18:53:30,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741848_1024 (size=5508) 2024-11-22T18:53:30,470 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/.tmp/table/cf0a11748fb544369470ccd450dfcd8a 2024-11-22T18:53:30,476 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/.tmp/info/498e4999810147bd98c11caf0a197784 as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/info/498e4999810147bd98c11caf0a197784 2024-11-22T18:53:30,482 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/info/498e4999810147bd98c11caf0a197784, entries=10, sequenceid=11, filesize=7.1 K 2024-11-22T18:53:30,482 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/.tmp/ns/4b335a4cb5a940f9adbcc1f8618b7023 as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/ns/4b335a4cb5a940f9adbcc1f8618b7023 2024-11-22T18:53:30,487 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/ns/4b335a4cb5a940f9adbcc1f8618b7023, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T18:53:30,488 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/.tmp/table/cf0a11748fb544369470ccd450dfcd8a as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/table/cf0a11748fb544369470ccd450dfcd8a 2024-11-22T18:53:30,493 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/table/cf0a11748fb544369470ccd450dfcd8a, entries=2, sequenceid=11, filesize=5.4 K 2024-11-22T18:53:30,494 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-11-22T18:53:30,499 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T18:53:30,499 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:53:30,499 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:53:30,499 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301610394Running coprocessor pre-close hooks at 1732301610394Disabling compacts and flushes for region at 1732301610394Disabling writes for close at 1732301610394Obtaining lock to block concurrent updates at 1732301610394Preparing flush snapshotting stores in 1588230740 at 1732301610394Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732301610394Flushing stores of hbase:meta,,1.1588230740 at 1732301610395 (+1 ms)Flushing 1588230740/info: creating writer at 1732301610395Flushing 1588230740/info: appending metadata at 1732301610411 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732301610411Flushing 1588230740/ns: creating writer at 1732301610422 (+11 ms)Flushing 1588230740/ns: appending metadata at 1732301610436 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732301610436Flushing 1588230740/table: creating writer at 1732301610446 (+10 ms)Flushing 1588230740/table: appending metadata at 1732301610464 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732301610464Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6399ae0c: reopening flushed file at 1732301610475 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b99ff9: reopening flushed file at 1732301610482 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23de781e: reopening flushed file at 1732301610487 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false at 1732301610494 (+7 ms)Writing region close event to WAL at 1732301610495 (+1 ms)Running coprocessor post-close hooks at 1732301610499 (+4 ms)Closed at 1732301610499 2024-11-22T18:53:30,500 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T18:53:30,594 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(976): stopping server d79ba0c344fb,46653,1732301559116; all regions closed. 2024-11-22T18:53:30,595 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,595 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,595 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,595 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,595 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741834_1010 (size=3306) 2024-11-22T18:53:30,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741834_1010 (size=3306) 2024-11-22T18:53:30,600 DEBUG [RS:0;d79ba0c344fb:46653 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/oldWALs 2024-11-22T18:53:30,600 INFO [RS:0;d79ba0c344fb:46653 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C46653%2C1732301559116.meta:.meta(num 1732301559929) 2024-11-22T18:53:30,601 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,601 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,601 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,601 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,601 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741844_1020 (size=1252) 2024-11-22T18:53:30,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741844_1020 (size=1252) 2024-11-22T18:53:30,606 DEBUG [RS:0;d79ba0c344fb:46653 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/oldWALs 2024-11-22T18:53:30,606 INFO [RS:0;d79ba0c344fb:46653 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C46653%2C1732301559116:(num 1732301610380) 2024-11-22T18:53:30,606 DEBUG [RS:0;d79ba0c344fb:46653 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:53:30,606 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:53:30,606 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:53:30,607 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.ChoreService(370): Chore service for: regionserver/d79ba0c344fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T18:53:30,607 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:53:30,607 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:53:30,607 INFO [RS:0;d79ba0c344fb:46653 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46653 2024-11-22T18:53:30,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d79ba0c344fb,46653,1732301559116 2024-11-22T18:53:30,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:53:30,610 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:53:30,611 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d79ba0c344fb,46653,1732301559116] 2024-11-22T18:53:30,613 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d79ba0c344fb,46653,1732301559116 already deleted, retry=false 2024-11-22T18:53:30,613 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d79ba0c344fb,46653,1732301559116 expired; onlineServers=0 2024-11-22T18:53:30,613 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd79ba0c344fb,42225,1732301559072' ***** 2024-11-22T18:53:30,613 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T18:53:30,613 INFO [M:0;d79ba0c344fb:42225 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:53:30,613 INFO [M:0;d79ba0c344fb:42225 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:53:30,613 DEBUG [M:0;d79ba0c344fb:42225 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T18:53:30,613 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T18:53:30,613 DEBUG [M:0;d79ba0c344fb:42225 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T18:53:30,613 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301559304 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301559304,5,FailOnTimeoutGroup] 2024-11-22T18:53:30,613 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301559305 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301559305,5,FailOnTimeoutGroup] 2024-11-22T18:53:30,613 INFO [M:0;d79ba0c344fb:42225 {}] hbase.ChoreService(370): Chore service for: master/d79ba0c344fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T18:53:30,613 INFO [M:0;d79ba0c344fb:42225 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:53:30,614 DEBUG [M:0;d79ba0c344fb:42225 {}] master.HMaster(1795): Stopping service threads 2024-11-22T18:53:30,614 INFO [M:0;d79ba0c344fb:42225 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T18:53:30,614 INFO [M:0;d79ba0c344fb:42225 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:53:30,614 INFO [M:0;d79ba0c344fb:42225 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T18:53:30,614 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T18:53:30,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T18:53:30,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:30,617 DEBUG [M:0;d79ba0c344fb:42225 {}] zookeeper.ZKUtil(347): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T18:53:30,617 WARN [M:0;d79ba0c344fb:42225 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T18:53:30,617 INFO [M:0;d79ba0c344fb:42225 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/.lastflushedseqids 2024-11-22T18:53:30,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741849_1025 (size=130) 2024-11-22T18:53:30,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741849_1025 (size=130) 2024-11-22T18:53:30,625 INFO [M:0;d79ba0c344fb:42225 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T18:53:30,625 INFO [M:0;d79ba0c344fb:42225 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T18:53:30,625 DEBUG [M:0;d79ba0c344fb:42225 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:53:30,625 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:53:30,625 DEBUG [M:0;d79ba0c344fb:42225 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:53:30,625 DEBUG [M:0;d79ba0c344fb:42225 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:53:30,625 DEBUG [M:0;d79ba0c344fb:42225 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:53:30,625 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-22T18:53:30,643 DEBUG [M:0;d79ba0c344fb:42225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b10f7dedf984a0ab4c38bd218e668ce is 82, key is hbase:meta,,1/info:regioninfo/1732301559955/Put/seqid=0 2024-11-22T18:53:30,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741850_1026 (size=5672) 2024-11-22T18:53:30,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741850_1026 (size=5672) 2024-11-22T18:53:30,649 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b10f7dedf984a0ab4c38bd218e668ce 2024-11-22T18:53:30,671 DEBUG [M:0;d79ba0c344fb:42225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/05336e9a2a194fc895ffabc7e32a5c8c is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732301560412/Put/seqid=0 2024-11-22T18:53:30,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741851_1027 (size=7823) 2024-11-22T18:53:30,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741851_1027 (size=7823) 2024-11-22T18:53:30,677 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/05336e9a2a194fc895ffabc7e32a5c8c 2024-11-22T18:53:30,682 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 05336e9a2a194fc895ffabc7e32a5c8c 2024-11-22T18:53:30,698 DEBUG [M:0;d79ba0c344fb:42225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1b13829de2cf44fa8ca6c505d6932baf is 69, key is d79ba0c344fb,46653,1732301559116/rs:state/1732301559359/Put/seqid=0 2024-11-22T18:53:30,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741852_1028 (size=5156) 2024-11-22T18:53:30,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741852_1028 (size=5156) 2024-11-22T18:53:30,707 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1b13829de2cf44fa8ca6c505d6932baf 2024-11-22T18:53:30,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:53:30,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46653-0x1014105ffa30001, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:53:30,712 INFO [RS:0;d79ba0c344fb:46653 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:53:30,712 INFO [RS:0;d79ba0c344fb:46653 {}] regionserver.HRegionServer(1031): Exiting; stopping=d79ba0c344fb,46653,1732301559116; zookeeper connection closed. 2024-11-22T18:53:30,712 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1cb20d40 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1cb20d40 2024-11-22T18:53:30,712 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T18:53:30,730 DEBUG [M:0;d79ba0c344fb:42225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ced52731fda34cbe98c19935a2285a14 is 52, key is load_balancer_on/state:d/1732301560041/Put/seqid=0 2024-11-22T18:53:30,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741853_1029 (size=5056) 2024-11-22T18:53:30,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741853_1029 (size=5056) 2024-11-22T18:53:30,737 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ced52731fda34cbe98c19935a2285a14 2024-11-22T18:53:30,743 DEBUG [M:0;d79ba0c344fb:42225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b10f7dedf984a0ab4c38bd218e668ce as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5b10f7dedf984a0ab4c38bd218e668ce 2024-11-22T18:53:30,748 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5b10f7dedf984a0ab4c38bd218e668ce, entries=8, sequenceid=121, filesize=5.5 K 2024-11-22T18:53:30,749 DEBUG [M:0;d79ba0c344fb:42225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/05336e9a2a194fc895ffabc7e32a5c8c as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/05336e9a2a194fc895ffabc7e32a5c8c 2024-11-22T18:53:30,755 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 05336e9a2a194fc895ffabc7e32a5c8c 2024-11-22T18:53:30,755 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/05336e9a2a194fc895ffabc7e32a5c8c, entries=14, sequenceid=121, filesize=7.6 K 2024-11-22T18:53:30,756 DEBUG [M:0;d79ba0c344fb:42225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1b13829de2cf44fa8ca6c505d6932baf as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1b13829de2cf44fa8ca6c505d6932baf 2024-11-22T18:53:30,762 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1b13829de2cf44fa8ca6c505d6932baf, entries=1, sequenceid=121, filesize=5.0 K 2024-11-22T18:53:30,763 DEBUG [M:0;d79ba0c344fb:42225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ced52731fda34cbe98c19935a2285a14 as hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ced52731fda34cbe98c19935a2285a14 2024-11-22T18:53:30,769 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42317/user/jenkins/test-data/14c61a98-43cc-9da1-410e-e66058cb279d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ced52731fda34cbe98c19935a2285a14, entries=1, sequenceid=121, filesize=4.9 K 2024-11-22T18:53:30,771 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=121, compaction requested=false 2024-11-22T18:53:30,789 INFO [M:0;d79ba0c344fb:42225 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:53:30,789 DEBUG [M:0;d79ba0c344fb:42225 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301610625Disabling compacts and flushes for region at 1732301610625Disabling writes for close at 1732301610625Obtaining lock to block concurrent updates at 1732301610625Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732301610625Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1732301610626 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732301610626Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732301610627 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732301610643 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732301610643Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732301610654 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732301610670 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732301610670Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732301610682 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732301610697 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732301610697Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732301610713 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732301610730 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732301610730Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76c1bc09: reopening flushed file at 1732301610742 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79b055b4: reopening flushed file at 1732301610748 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@658cc006: reopening flushed file at 1732301610755 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3451b091: reopening flushed file at 1732301610762 (+7 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=121, compaction requested=false at 1732301610771 (+9 ms)Writing region close event to WAL at 1732301610789 (+18 ms)Closed at 1732301610789 2024-11-22T18:53:30,790 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,790 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,790 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,790 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,790 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:53:30,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42649 is added to blk_1073741830_1006 (size=53035) 2024-11-22T18:53:30,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34905 is added to blk_1073741830_1006 (size=53035) 2024-11-22T18:53:30,794 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:53:30,794 INFO [M:0;d79ba0c344fb:42225 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T18:53:30,794 INFO [M:0;d79ba0c344fb:42225 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42225 2024-11-22T18:53:30,794 INFO [M:0;d79ba0c344fb:42225 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:53:30,896 INFO [M:0;d79ba0c344fb:42225 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:53:30,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:53:30,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x1014105ffa30000, quorum=127.0.0.1:56543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:53:30,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76b2d62a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:53:30,900 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40c8737d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:53:30,900 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:53:30,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67c2b9b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:53:30,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16aeea80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/hadoop.log.dir/,STOPPED} 2024-11-22T18:53:30,902 WARN [BP-1599718698-172.17.0.2-1732301558309 heartbeating to localhost/127.0.0.1:42317 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:53:30,902 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:53:30,902 WARN [BP-1599718698-172.17.0.2-1732301558309 heartbeating to localhost/127.0.0.1:42317 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1599718698-172.17.0.2-1732301558309 (Datanode Uuid 8aa88aa0-6399-4715-9ca7-4e3c1a2cdc84) service to localhost/127.0.0.1:42317 2024-11-22T18:53:30,902 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:53:30,902 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/data/data3/current/BP-1599718698-172.17.0.2-1732301558309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:53:30,903 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/data/data4/current/BP-1599718698-172.17.0.2-1732301558309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:53:30,903 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:53:30,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ff27683{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:53:30,906 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38f5461{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:53:30,906 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:53:30,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a5f2a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:53:30,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa328f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/hadoop.log.dir/,STOPPED} 2024-11-22T18:53:30,909 WARN [BP-1599718698-172.17.0.2-1732301558309 heartbeating to localhost/127.0.0.1:42317 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:53:30,909 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:53:30,909 WARN [BP-1599718698-172.17.0.2-1732301558309 heartbeating to localhost/127.0.0.1:42317 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1599718698-172.17.0.2-1732301558309 (Datanode Uuid 94a7c4de-dd99-4213-8ebf-a7f55b69c462) service to localhost/127.0.0.1:42317 2024-11-22T18:53:30,909 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:53:30,909 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/data/data1/current/BP-1599718698-172.17.0.2-1732301558309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:53:30,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/cluster_b6c4e7e8-fe57-3337-aa1e-9f24d9be3a48/data/data2/current/BP-1599718698-172.17.0.2-1732301558309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:53:30,910 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:53:30,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@48b18cf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:53:30,920 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30eae670{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:53:30,920 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:53:30,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7638bdc6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:53:30,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bbcc3bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/hadoop.log.dir/,STOPPED} 2024-11-22T18:53:30,927 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T18:53:30,946 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T18:53:30,958 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 180) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42317 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42317 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42317 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42317 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42317 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/d79ba0c344fb:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) - Thread LEAK? -, OpenFileDescriptor=481 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=99 (was 117), ProcessCount=11 (was 11), AvailableMemoryMB=7578 (was 7964) 2024-11-22T18:53:30,968 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=481, MaxFileDescriptor=1048576, SystemLoadAverage=99, ProcessCount=11, AvailableMemoryMB=7578 2024-11-22T18:53:30,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T18:53:30,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/hadoop.log.dir so I do NOT create it in target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700 2024-11-22T18:53:30,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5b0f5c40-b18a-bc60-0f18-bf97c927e44f/hadoop.tmp.dir so I do NOT create it in target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700 2024-11-22T18:53:30,968 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef, deleteOnExit=true 2024-11-22T18:53:30,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T18:53:30,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/test.cache.data in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/hadoop.log.dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T18:53:30,969 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T18:53:30,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/nfs.dump.dir in system properties and HBase conf 2024-11-22T18:53:30,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/java.io.tmpdir in system properties and HBase conf 2024-11-22T18:53:30,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:53:30,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T18:53:30,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T18:53:30,984 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:53:31,055 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:53:31,058 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:53:31,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:53:31,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:53:31,060 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:53:31,060 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:53:31,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75ee0a96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:53:31,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a933e33{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:53:31,190 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38dc0fd7{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/java.io.tmpdir/jetty-localhost-39627-hadoop-hdfs-3_4_1-tests_jar-_-any-9563431775069087052/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:53:31,190 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43bec0c2{HTTP/1.1, (http/1.1)}{localhost:39627} 2024-11-22T18:53:31,191 INFO [Time-limited test {}] server.Server(415): Started @236580ms 2024-11-22T18:53:31,205 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:53:31,294 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:53:31,297 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:53:31,299 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:53:31,299 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:53:31,300 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:53:31,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47b62c1f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:53:31,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bb90d86{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:53:31,382 INFO [regionserver/d79ba0c344fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:53:31,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:31,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:31,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2124b505{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/java.io.tmpdir/jetty-localhost-40495-hadoop-hdfs-3_4_1-tests_jar-_-any-12830435569936409158/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:53:31,454 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@285e4e67{HTTP/1.1, (http/1.1)}{localhost:40495} 2024-11-22T18:53:31,454 INFO [Time-limited test {}] server.Server(415): Started @236843ms 2024-11-22T18:53:31,455 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:53:31,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:53:31,496 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:53:31,498 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:53:31,498 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:53:31,498 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:53:31,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@277e18bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:53:31,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15c8c411{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:53:31,557 WARN [Thread-1960 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/data/data1/current/BP-736412378-172.17.0.2-1732301610992/current, will proceed with Du for space computation calculation, 2024-11-22T18:53:31,557 WARN [Thread-1961 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/data/data2/current/BP-736412378-172.17.0.2-1732301610992/current, will proceed with Du for space computation calculation, 2024-11-22T18:53:31,584 WARN [Thread-1939 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:53:31,586 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7a819d44790aece with lease ID 0xf6faa1b6ea78f0b2: Processing first storage report for DS-cfd48fe6-ed08-42b3-b377-25165a6c828b from datanode DatanodeRegistration(127.0.0.1:33065, datanodeUuid=da17622b-a6b7-407a-bf5b-666b80470297, infoPort=45179, infoSecurePort=0, ipcPort=35827, storageInfo=lv=-57;cid=testClusterID;nsid=1230341304;c=1732301610992) 2024-11-22T18:53:31,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7a819d44790aece with lease ID 0xf6faa1b6ea78f0b2: from storage DS-cfd48fe6-ed08-42b3-b377-25165a6c828b node DatanodeRegistration(127.0.0.1:33065, datanodeUuid=da17622b-a6b7-407a-bf5b-666b80470297, infoPort=45179, infoSecurePort=0, ipcPort=35827, storageInfo=lv=-57;cid=testClusterID;nsid=1230341304;c=1732301610992), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:53:31,586 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7a819d44790aece with lease ID 0xf6faa1b6ea78f0b2: Processing first storage report for DS-9248b290-6fed-407f-8099-402053658a1a from datanode DatanodeRegistration(127.0.0.1:33065, datanodeUuid=da17622b-a6b7-407a-bf5b-666b80470297, infoPort=45179, infoSecurePort=0, ipcPort=35827, storageInfo=lv=-57;cid=testClusterID;nsid=1230341304;c=1732301610992) 2024-11-22T18:53:31,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7a819d44790aece with lease ID 0xf6faa1b6ea78f0b2: from storage DS-9248b290-6fed-407f-8099-402053658a1a node DatanodeRegistration(127.0.0.1:33065, datanodeUuid=da17622b-a6b7-407a-bf5b-666b80470297, infoPort=45179, infoSecurePort=0, ipcPort=35827, storageInfo=lv=-57;cid=testClusterID;nsid=1230341304;c=1732301610992), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:53:31,632 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f63b03b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/java.io.tmpdir/jetty-localhost-38209-hadoop-hdfs-3_4_1-tests_jar-_-any-5264307303819159835/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:53:31,632 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f1304aa{HTTP/1.1, (http/1.1)}{localhost:38209} 2024-11-22T18:53:31,632 INFO [Time-limited test {}] server.Server(415): Started @237022ms 2024-11-22T18:53:31,634 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:53:31,760 WARN [Thread-1986 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/data/data3/current/BP-736412378-172.17.0.2-1732301610992/current, will proceed with Du for space computation calculation, 2024-11-22T18:53:31,760 WARN [Thread-1987 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/data/data4/current/BP-736412378-172.17.0.2-1732301610992/current, will proceed with Du for space computation calculation, 2024-11-22T18:53:31,783 WARN [Thread-1975 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:53:31,786 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7cd0465d3376e805 with lease ID 0xf6faa1b6ea78f0b3: Processing first storage report for DS-f9bf92a4-5e53-4d43-b73f-ba72a8553064 from datanode DatanodeRegistration(127.0.0.1:38581, datanodeUuid=54919de8-122d-4a60-9ad5-271a06ea772a, infoPort=33757, infoSecurePort=0, ipcPort=36081, storageInfo=lv=-57;cid=testClusterID;nsid=1230341304;c=1732301610992) 2024-11-22T18:53:31,786 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7cd0465d3376e805 with lease ID 0xf6faa1b6ea78f0b3: from storage DS-f9bf92a4-5e53-4d43-b73f-ba72a8553064 node DatanodeRegistration(127.0.0.1:38581, datanodeUuid=54919de8-122d-4a60-9ad5-271a06ea772a, infoPort=33757, infoSecurePort=0, ipcPort=36081, storageInfo=lv=-57;cid=testClusterID;nsid=1230341304;c=1732301610992), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:53:31,786 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7cd0465d3376e805 with lease ID 0xf6faa1b6ea78f0b3: Processing first storage report for DS-81a20983-741b-4e87-ad80-be4afa15562f from datanode DatanodeRegistration(127.0.0.1:38581, datanodeUuid=54919de8-122d-4a60-9ad5-271a06ea772a, infoPort=33757, infoSecurePort=0, ipcPort=36081, storageInfo=lv=-57;cid=testClusterID;nsid=1230341304;c=1732301610992) 2024-11-22T18:53:31,787 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7cd0465d3376e805 with lease ID 0xf6faa1b6ea78f0b3: from storage DS-81a20983-741b-4e87-ad80-be4afa15562f node DatanodeRegistration(127.0.0.1:38581, datanodeUuid=54919de8-122d-4a60-9ad5-271a06ea772a, infoPort=33757, infoSecurePort=0, ipcPort=36081, storageInfo=lv=-57;cid=testClusterID;nsid=1230341304;c=1732301610992), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:53:31,860 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700 2024-11-22T18:53:31,863 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/zookeeper_0, clientPort=55954, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T18:53:31,864 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55954 2024-11-22T18:53:31,865 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:53:31,867 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:53:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:53:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:53:31,879 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993 with version=8 2024-11-22T18:53:31,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/hbase-staging 2024-11-22T18:53:31,881 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:53:31,881 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:53:31,881 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:53:31,881 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:53:31,881 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:53:31,881 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:53:31,881 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T18:53:31,882 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:53:31,882 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42005 2024-11-22T18:53:31,884 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42005 connecting to ZooKeeper ensemble=127.0.0.1:55954 2024-11-22T18:53:31,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:420050x0, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:53:31,891 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42005-0x1014106cde60000 connected 2024-11-22T18:53:31,910 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:53:31,911 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:53:31,914 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:53:31,914 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993, hbase.cluster.distributed=false 2024-11-22T18:53:31,915 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:53:31,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42005 2024-11-22T18:53:31,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42005 2024-11-22T18:53:31,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42005 2024-11-22T18:53:31,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42005 2024-11-22T18:53:31,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42005 2024-11-22T18:53:31,935 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:53:31,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:53:31,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:53:31,935 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:53:31,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:53:31,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:53:31,935 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T18:53:31,935 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:53:31,936 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39949 2024-11-22T18:53:31,937 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39949 connecting to ZooKeeper ensemble=127.0.0.1:55954 2024-11-22T18:53:31,938 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:53:31,940 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:53:31,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:399490x0, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:53:31,945 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:399490x0, quorum=127.0.0.1:55954, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:53:31,945 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39949-0x1014106cde60001 connected 2024-11-22T18:53:31,945 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T18:53:31,946 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T18:53:31,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T18:53:31,948 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:53:31,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39949 2024-11-22T18:53:31,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39949 2024-11-22T18:53:31,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39949 2024-11-22T18:53:31,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39949 2024-11-22T18:53:31,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39949 2024-11-22T18:53:31,963 DEBUG [M:0;d79ba0c344fb:42005 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d79ba0c344fb:42005 2024-11-22T18:53:31,964 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d79ba0c344fb,42005,1732301611881 2024-11-22T18:53:31,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:53:31,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:53:31,968 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d79ba0c344fb,42005,1732301611881 2024-11-22T18:53:31,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T18:53:31,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:31,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:31,972 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T18:53:31,972 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d79ba0c344fb,42005,1732301611881 from backup master directory 2024-11-22T18:53:31,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:53:31,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d79ba0c344fb,42005,1732301611881 2024-11-22T18:53:31,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:53:31,974 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:53:31,974 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d79ba0c344fb,42005,1732301611881 2024-11-22T18:53:31,979 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/hbase.id] with ID: f0ddcf57-b4e8-44be-b840-af23336eeb1f 2024-11-22T18:53:31,979 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/.tmp/hbase.id 2024-11-22T18:53:31,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:53:31,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:53:31,985 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/.tmp/hbase.id]:[hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/hbase.id] 2024-11-22T18:53:31,997 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:53:31,997 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T18:53:31,998 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T18:53:32,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:32,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:32,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:53:32,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:53:32,011 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:53:32,012 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T18:53:32,012 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:53:32,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:53:32,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:53:32,022 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store 2024-11-22T18:53:32,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:53:32,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:53:32,036 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:53:32,036 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:53:32,036 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:53:32,036 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:53:32,036 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:53:32,036 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:53:32,036 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:53:32,036 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301612036Disabling compacts and flushes for region at 1732301612036Disabling writes for close at 1732301612036Writing region close event to WAL at 1732301612036Closed at 1732301612036 2024-11-22T18:53:32,037 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/.initializing 2024-11-22T18:53:32,038 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/WALs/d79ba0c344fb,42005,1732301611881 2024-11-22T18:53:32,041 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C42005%2C1732301611881, suffix=, logDir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/WALs/d79ba0c344fb,42005,1732301611881, archiveDir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/oldWALs, maxLogs=10 2024-11-22T18:53:32,041 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C42005%2C1732301611881.1732301612041 2024-11-22T18:53:32,047 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/WALs/d79ba0c344fb,42005,1732301611881/d79ba0c344fb%2C42005%2C1732301611881.1732301612041 2024-11-22T18:53:32,053 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45179:45179),(127.0.0.1/127.0.0.1:33757:33757)] 2024-11-22T18:53:32,057 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:53:32,057 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:53:32,057 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,057 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T18:53:32,060 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:53:32,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T18:53:32,063 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:53:32,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T18:53:32,065 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:53:32,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T18:53:32,067 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:53:32,067 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,068 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,069 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,071 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,071 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,071 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T18:53:32,073 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:53:32,075 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:53:32,076 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806774, jitterRate=0.025867387652397156}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T18:53:32,077 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732301612057Initializing all the Stores at 1732301612058 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301612058Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301612058Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301612058Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301612058Cleaning up temporary data from old regions at 1732301612071 (+13 ms)Region opened successfully at 1732301612076 (+5 ms) 2024-11-22T18:53:32,077 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T18:53:32,080 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65d14fc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:53:32,081 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T18:53:32,081 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T18:53:32,081 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T18:53:32,081 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T18:53:32,082 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T18:53:32,082 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T18:53:32,082 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T18:53:32,084 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T18:53:32,085 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T18:53:32,088 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T18:53:32,089 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T18:53:32,089 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T18:53:32,091 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T18:53:32,091 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T18:53:32,093 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T18:53:32,094 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T18:53:32,095 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T18:53:32,096 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T18:53:32,099 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T18:53:32,102 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T18:53:32,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:53:32,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:53:32,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:32,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:32,104 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d79ba0c344fb,42005,1732301611881, sessionid=0x1014106cde60000, setting cluster-up flag (Was=false) 2024-11-22T18:53:32,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:32,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:32,114 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T18:53:32,115 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,42005,1732301611881 2024-11-22T18:53:32,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:32,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:32,126 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T18:53:32,127 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,42005,1732301611881 2024-11-22T18:53:32,128 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T18:53:32,130 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T18:53:32,131 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T18:53:32,131 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T18:53:32,131 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d79ba0c344fb,42005,1732301611881 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T18:53:32,137 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:53:32,137 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:53:32,137 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:53:32,137 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:53:32,137 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d79ba0c344fb:0, corePoolSize=10, maxPoolSize=10 2024-11-22T18:53:32,137 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,137 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:53:32,137 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,140 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732301642139 2024-11-22T18:53:32,140 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T18:53:32,140 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T18:53:32,140 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T18:53:32,140 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T18:53:32,140 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T18:53:32,140 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T18:53:32,140 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,141 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T18:53:32,141 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T18:53:32,141 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T18:53:32,141 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:53:32,141 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T18:53:32,141 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T18:53:32,141 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T18:53:32,142 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,143 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T18:53:32,144 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301612141,5,FailOnTimeoutGroup] 2024-11-22T18:53:32,145 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301612144,5,FailOnTimeoutGroup] 2024-11-22T18:53:32,145 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,145 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T18:53:32,145 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,145 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,151 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(746): ClusterId : f0ddcf57-b4e8-44be-b840-af23336eeb1f 2024-11-22T18:53:32,151 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T18:53:32,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:53:32,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:53:32,155 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T18:53:32,155 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T18:53:32,156 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T18:53:32,156 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993 2024-11-22T18:53:32,163 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T18:53:32,163 DEBUG [RS:0;d79ba0c344fb:39949 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6233df0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:53:32,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:53:32,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:53:32,177 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:53:32,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:53:32,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:53:32,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:53:32,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:53:32,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:53:32,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:53:32,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:53:32,184 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:53:32,184 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:53:32,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:53:32,185 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:53:32,185 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:53:32,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:53:32,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740 2024-11-22T18:53:32,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740 2024-11-22T18:53:32,187 DEBUG [RS:0;d79ba0c344fb:39949 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d79ba0c344fb:39949 2024-11-22T18:53:32,187 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T18:53:32,187 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T18:53:32,187 DEBUG [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T18:53:32,188 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(2659): reportForDuty to master=d79ba0c344fb,42005,1732301611881 with port=39949, startcode=1732301611934 2024-11-22T18:53:32,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:53:32,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:53:32,188 DEBUG [RS:0;d79ba0c344fb:39949 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T18:53:32,189 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:53:32,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:53:32,192 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43487, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T18:53:32,192 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42005 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:32,192 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42005 {}] master.ServerManager(517): Registering regionserver=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:32,193 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:53:32,193 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852009, jitterRate=0.08338634669780731}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:53:32,194 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732301612177Initializing all the Stores at 1732301612178 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301612178Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301612178Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301612178Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301612178Cleaning up temporary data from old regions at 1732301612188 (+10 ms)Region opened successfully at 1732301612194 (+6 ms) 2024-11-22T18:53:32,194 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:53:32,194 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:53:32,194 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:53:32,194 DEBUG [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993 2024-11-22T18:53:32,194 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:53:32,194 DEBUG [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39409 2024-11-22T18:53:32,194 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:53:32,194 DEBUG [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T18:53:32,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:53:32,197 DEBUG [RS:0;d79ba0c344fb:39949 {}] zookeeper.ZKUtil(111): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:32,197 WARN [RS:0;d79ba0c344fb:39949 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:53:32,197 INFO [RS:0;d79ba0c344fb:39949 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:53:32,197 DEBUG [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:32,198 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:53:32,198 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301612194Disabling compacts and flushes for region at 1732301612194Disabling writes for close at 1732301612194Writing region close event to WAL at 1732301612198 (+4 ms)Closed at 1732301612198 2024-11-22T18:53:32,199 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d79ba0c344fb,39949,1732301611934] 2024-11-22T18:53:32,200 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:53:32,200 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T18:53:32,200 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T18:53:32,201 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:53:32,202 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T18:53:32,213 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T18:53:32,216 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T18:53:32,217 INFO [RS:0;d79ba0c344fb:39949 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T18:53:32,217 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,217 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T18:53:32,218 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T18:53:32,218 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,218 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,218 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,218 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:53:32,219 DEBUG [RS:0;d79ba0c344fb:39949 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:53:32,220 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,221 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,221 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,221 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,221 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,221 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,39949,1732301611934-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:53:32,239 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T18:53:32,239 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,39949,1732301611934-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,239 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,239 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.Replication(171): d79ba0c344fb,39949,1732301611934 started 2024-11-22T18:53:32,255 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,255 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(1482): Serving as d79ba0c344fb,39949,1732301611934, RpcServer on d79ba0c344fb/172.17.0.2:39949, sessionid=0x1014106cde60001 2024-11-22T18:53:32,255 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T18:53:32,255 DEBUG [RS:0;d79ba0c344fb:39949 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:32,255 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,39949,1732301611934' 2024-11-22T18:53:32,255 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T18:53:32,256 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T18:53:32,256 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T18:53:32,256 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T18:53:32,257 DEBUG [RS:0;d79ba0c344fb:39949 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:32,257 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,39949,1732301611934' 2024-11-22T18:53:32,257 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T18:53:32,257 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T18:53:32,257 DEBUG [RS:0;d79ba0c344fb:39949 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T18:53:32,257 INFO [RS:0;d79ba0c344fb:39949 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T18:53:32,257 INFO [RS:0;d79ba0c344fb:39949 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T18:53:32,352 WARN [d79ba0c344fb:42005 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T18:53:32,360 INFO [RS:0;d79ba0c344fb:39949 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C39949%2C1732301611934, suffix=, logDir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934, archiveDir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/oldWALs, maxLogs=32 2024-11-22T18:53:32,360 INFO [RS:0;d79ba0c344fb:39949 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C39949%2C1732301611934.1732301612360 2024-11-22T18:53:32,366 INFO [RS:0;d79ba0c344fb:39949 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934/d79ba0c344fb%2C39949%2C1732301611934.1732301612360 2024-11-22T18:53:32,367 DEBUG [RS:0;d79ba0c344fb:39949 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45179:45179),(127.0.0.1/127.0.0.1:33757:33757)] 2024-11-22T18:53:32,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:32,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:32,603 DEBUG [d79ba0c344fb:42005 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T18:53:32,603 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:32,605 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,39949,1732301611934, state=OPENING 2024-11-22T18:53:32,607 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T18:53:32,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:32,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:53:32,610 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:53:32,610 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:53:32,610 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:53:32,610 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,39949,1732301611934}] 2024-11-22T18:53:32,764 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T18:53:32,766 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59049, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T18:53:32,783 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T18:53:32,783 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:53:32,785 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C39949%2C1732301611934.meta, suffix=.meta, logDir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934, archiveDir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/oldWALs, maxLogs=32 2024-11-22T18:53:32,786 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C39949%2C1732301611934.meta.1732301612786.meta 2024-11-22T18:53:32,793 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934/d79ba0c344fb%2C39949%2C1732301611934.meta.1732301612786.meta 2024-11-22T18:53:32,804 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45179:45179),(127.0.0.1/127.0.0.1:33757:33757)] 2024-11-22T18:53:32,808 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:53:32,809 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T18:53:32,809 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T18:53:32,809 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T18:53:32,809 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T18:53:32,809 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:53:32,809 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T18:53:32,809 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T18:53:32,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:53:32,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:53:32,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:53:32,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:53:32,813 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:53:32,813 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:53:32,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:53:32,815 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:53:32,815 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:53:32,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:53:32,816 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:53:32,816 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:53:32,817 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:53:32,817 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740 2024-11-22T18:53:32,818 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740 2024-11-22T18:53:32,820 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:53:32,820 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:53:32,820 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:53:32,822 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:53:32,823 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786975, jitterRate=6.911605596542358E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:53:32,823 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T18:53:32,824 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732301612809Writing region info on filesystem at 1732301612809Initializing all the Stores at 1732301612810 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301612810Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301612811 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301612811Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301612811Cleaning up temporary data from old regions at 1732301612820 (+9 ms)Running coprocessor post-open hooks at 1732301612823 (+3 ms)Region opened successfully at 1732301612824 (+1 ms) 2024-11-22T18:53:32,825 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732301612764 2024-11-22T18:53:32,828 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T18:53:32,829 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T18:53:32,829 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:32,830 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,39949,1732301611934, state=OPEN 2024-11-22T18:53:32,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:53:32,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:53:32,837 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:32,837 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:53:32,838 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:53:32,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T18:53:32,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,39949,1732301611934 in 227 msec 2024-11-22T18:53:32,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T18:53:32,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 641 msec 2024-11-22T18:53:32,845 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:53:32,845 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T18:53:32,846 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:53:32,847 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,39949,1732301611934, seqNum=-1] 2024-11-22T18:53:32,847 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:53:32,848 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45119, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:53:32,855 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 724 msec 2024-11-22T18:53:32,855 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732301612855, completionTime=-1 2024-11-22T18:53:32,855 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T18:53:32,855 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T18:53:32,857 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T18:53:32,858 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732301672858 2024-11-22T18:53:32,858 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732301732858 2024-11-22T18:53:32,858 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T18:53:32,858 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42005,1732301611881-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,858 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42005,1732301611881-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,858 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42005,1732301611881-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,858 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d79ba0c344fb:42005, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,858 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,859 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,861 DEBUG [master/d79ba0c344fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T18:53:32,863 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.889sec 2024-11-22T18:53:32,863 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T18:53:32,863 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T18:53:32,863 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T18:53:32,863 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T18:53:32,863 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T18:53:32,863 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42005,1732301611881-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:53:32,863 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42005,1732301611881-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T18:53:32,866 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T18:53:32,866 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T18:53:32,866 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,42005,1732301611881-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:53:32,952 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49490ce4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:53:32,952 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d79ba0c344fb,42005,-1 for getting cluster id 2024-11-22T18:53:32,952 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T18:53:32,954 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f0ddcf57-b4e8-44be-b840-af23336eeb1f' 2024-11-22T18:53:32,954 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T18:53:32,954 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f0ddcf57-b4e8-44be-b840-af23336eeb1f" 2024-11-22T18:53:32,955 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e2b4dd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:53:32,955 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d79ba0c344fb,42005,-1] 2024-11-22T18:53:32,955 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T18:53:32,955 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:53:32,957 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49258, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T18:53:32,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70aed17c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:53:32,958 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:53:32,960 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,39949,1732301611934, seqNum=-1] 2024-11-22T18:53:32,960 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:53:32,961 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38364, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:53:32,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d79ba0c344fb,42005,1732301611881 2024-11-22T18:53:32,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:53:32,966 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T18:53:32,966 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T18:53:32,968 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is d79ba0c344fb,42005,1732301611881 2024-11-22T18:53:32,968 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@69624f08 2024-11-22T18:53:32,968 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T18:53:32,969 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49266, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T18:53:32,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42005 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T18:53:32,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42005 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T18:53:32,970 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42005 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:53:32,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42005 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-22T18:53:32,973 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T18:53:32,973 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:32,973 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42005 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-22T18:53:32,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42005 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T18:53:32,975 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T18:53:32,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741835_1011 (size=381) 2024-11-22T18:53:32,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741835_1011 (size=381) 2024-11-22T18:53:32,985 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c4f64eb1dac983aa8e376c1d005981a8, NAME => 'TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993 2024-11-22T18:53:32,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741836_1012 (size=64) 2024-11-22T18:53:32,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741836_1012 (size=64) 2024-11-22T18:53:32,992 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:53:32,992 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing c4f64eb1dac983aa8e376c1d005981a8, disabling compactions & flushes 2024-11-22T18:53:32,992 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:32,992 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:32,992 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. after waiting 0 ms 2024-11-22T18:53:32,992 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:32,992 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:32,992 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c4f64eb1dac983aa8e376c1d005981a8: Waiting for close lock at 1732301612992Disabling compacts and flushes for region at 1732301612992Disabling writes for close at 1732301612992Writing region close event to WAL at 1732301612992Closed at 1732301612992 2024-11-22T18:53:32,994 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T18:53:32,994 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732301612994"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732301612994"}]},"ts":"1732301612994"} 2024-11-22T18:53:32,997 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T18:53:32,998 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T18:53:32,998 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732301612998"}]},"ts":"1732301612998"} 2024-11-22T18:53:33,000 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-22T18:53:33,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c4f64eb1dac983aa8e376c1d005981a8, ASSIGN}] 2024-11-22T18:53:33,002 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c4f64eb1dac983aa8e376c1d005981a8, ASSIGN 2024-11-22T18:53:33,003 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c4f64eb1dac983aa8e376c1d005981a8, ASSIGN; state=OFFLINE, location=d79ba0c344fb,39949,1732301611934; forceNewPlan=false, retain=false 2024-11-22T18:53:33,154 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c4f64eb1dac983aa8e376c1d005981a8, regionState=OPENING, regionLocation=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:33,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c4f64eb1dac983aa8e376c1d005981a8, ASSIGN because future has completed 2024-11-22T18:53:33,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c4f64eb1dac983aa8e376c1d005981a8, server=d79ba0c344fb,39949,1732301611934}] 2024-11-22T18:53:33,315 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:33,315 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c4f64eb1dac983aa8e376c1d005981a8, NAME => 'TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:53:33,316 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,316 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:53:33,316 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,316 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,317 INFO [StoreOpener-c4f64eb1dac983aa8e376c1d005981a8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,319 INFO [StoreOpener-c4f64eb1dac983aa8e376c1d005981a8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c4f64eb1dac983aa8e376c1d005981a8 columnFamilyName info 2024-11-22T18:53:33,319 DEBUG [StoreOpener-c4f64eb1dac983aa8e376c1d005981a8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:33,320 INFO [StoreOpener-c4f64eb1dac983aa8e376c1d005981a8-1 {}] regionserver.HStore(327): Store=c4f64eb1dac983aa8e376c1d005981a8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:53:33,320 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,320 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,321 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,321 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,321 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,323 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,325 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:53:33,326 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c4f64eb1dac983aa8e376c1d005981a8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872553, jitterRate=0.10950906574726105}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T18:53:33,326 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:33,327 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c4f64eb1dac983aa8e376c1d005981a8: Running coprocessor pre-open hook at 1732301613316Writing region info on filesystem at 1732301613316Initializing all the Stores at 1732301613317 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301613317Cleaning up temporary data from old regions at 1732301613321 (+4 ms)Running coprocessor post-open hooks at 1732301613326 (+5 ms)Region opened successfully at 1732301613327 (+1 ms) 2024-11-22T18:53:33,328 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., pid=6, masterSystemTime=1732301613310 2024-11-22T18:53:33,331 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:33,331 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:33,332 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c4f64eb1dac983aa8e376c1d005981a8, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:33,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c4f64eb1dac983aa8e376c1d005981a8, server=d79ba0c344fb,39949,1732301611934 because future has completed 2024-11-22T18:53:33,339 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T18:53:33,339 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c4f64eb1dac983aa8e376c1d005981a8, server=d79ba0c344fb,39949,1732301611934 in 179 msec 2024-11-22T18:53:33,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T18:53:33,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c4f64eb1dac983aa8e376c1d005981a8, ASSIGN in 338 msec 2024-11-22T18:53:33,343 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T18:53:33,343 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732301613343"}]},"ts":"1732301613343"} 2024-11-22T18:53:33,345 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-22T18:53:33,346 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T18:53:33,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 377 msec 2024-11-22T18:53:33,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:33,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:34,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:34,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:35,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:35,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:35,426 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,426 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,426 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,448 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,953 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T18:53:35,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:35,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:36,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:36,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:37,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:37,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:38,213 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T18:53:38,213 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-22T18:53:38,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:38,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:39,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:39,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:39,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-22T18:53:39,873 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T18:53:39,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T18:53:40,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:40,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:41,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:41,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:42,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:42,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:43,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42005 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T18:53:43,038 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-22T18:53:43,038 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-22T18:53:43,041 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-22T18:53:43,041 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:43,043 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., hostname=d79ba0c344fb,39949,1732301611934, seqNum=2] 2024-11-22T18:53:43,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:43,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c4f64eb1dac983aa8e376c1d005981a8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T18:53:43,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/d9d0970b80c6453c86f90dcfc7caed55 is 1080, key is row0001/info:/1732301623044/Put/seqid=0 2024-11-22T18:53:43,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741837_1013 (size=12509) 2024-11-22T18:53:43,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741837_1013 (size=12509) 2024-11-22T18:53:43,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/d9d0970b80c6453c86f90dcfc7caed55 2024-11-22T18:53:43,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/d9d0970b80c6453c86f90dcfc7caed55 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/d9d0970b80c6453c86f90dcfc7caed55 2024-11-22T18:53:43,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c4f64eb1dac983aa8e376c1d005981a8, server=d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-22T18:53:43,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/d9d0970b80c6453c86f90dcfc7caed55, entries=7, sequenceid=11, filesize=12.2 K 2024-11-22T18:53:43,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for c4f64eb1dac983aa8e376c1d005981a8 in 42ms, sequenceid=11, compaction requested=false 2024-11-22T18:53:43,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c4f64eb1dac983aa8e376c1d005981a8: 2024-11-22T18:53:43,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38364 deadline: 1732301633096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c4f64eb1dac983aa8e376c1d005981a8, server=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:43,123 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., hostname=d79ba0c344fb,39949,1732301611934, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., hostname=d79ba0c344fb,39949,1732301611934, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c4f64eb1dac983aa8e376c1d005981a8, server=d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T18:53:43,124 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., hostname=d79ba0c344fb,39949,1732301611934, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c4f64eb1dac983aa8e376c1d005981a8, server=d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T18:53:43,124 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., hostname=d79ba0c344fb,39949,1732301611934, seqNum=2 because the exception is null or not the one we care about 2024-11-22T18:53:43,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:43,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:44,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:44,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:45,376 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T18:53:45,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:53:45,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:45,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:46,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:46,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:47,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:47,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:48,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:48,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:49,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:49,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:50,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:50,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:51,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:51,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:52,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:52,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:53,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c4f64eb1dac983aa8e376c1d005981a8 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-22T18:53:53,217 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/f4f5824ecf0f4cc4a9556e23f34116c9 is 1080, key is row0008/info:/1732301623058/Put/seqid=0 2024-11-22T18:53:53,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741838_1014 (size=29761) 2024-11-22T18:53:53,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741838_1014 (size=29761) 2024-11-22T18:53:53,224 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/f4f5824ecf0f4cc4a9556e23f34116c9 2024-11-22T18:53:53,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/f4f5824ecf0f4cc4a9556e23f34116c9 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/f4f5824ecf0f4cc4a9556e23f34116c9 2024-11-22T18:53:53,240 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/f4f5824ecf0f4cc4a9556e23f34116c9, entries=23, sequenceid=37, filesize=29.1 K 2024-11-22T18:53:53,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for c4f64eb1dac983aa8e376c1d005981a8 in 32ms, sequenceid=37, compaction requested=false 2024-11-22T18:53:53,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c4f64eb1dac983aa8e376c1d005981a8: 2024-11-22T18:53:53,242 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-22T18:53:53,242 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:53:53,242 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/f4f5824ecf0f4cc4a9556e23f34116c9 because midkey is the same as first or last row 2024-11-22T18:53:53,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:53,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:54,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:54,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:55,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:55,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c4f64eb1dac983aa8e376c1d005981a8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T18:53:55,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/63953aa6d6c34f3f82d826ec51a56b3e is 1080, key is row0031/info:/1732301633211/Put/seqid=0 2024-11-22T18:53:55,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741839_1015 (size=12509) 2024-11-22T18:53:55,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741839_1015 (size=12509) 2024-11-22T18:53:55,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/63953aa6d6c34f3f82d826ec51a56b3e 2024-11-22T18:53:55,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/63953aa6d6c34f3f82d826ec51a56b3e as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/63953aa6d6c34f3f82d826ec51a56b3e 2024-11-22T18:53:55,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/63953aa6d6c34f3f82d826ec51a56b3e, entries=7, sequenceid=47, filesize=12.2 K 2024-11-22T18:53:55,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for c4f64eb1dac983aa8e376c1d005981a8 in 24ms, sequenceid=47, compaction requested=true 2024-11-22T18:53:55,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c4f64eb1dac983aa8e376c1d005981a8: 2024-11-22T18:53:55,250 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-22T18:53:55,250 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:53:55,250 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/f4f5824ecf0f4cc4a9556e23f34116c9 because midkey is the same as first or last row 2024-11-22T18:53:55,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4f64eb1dac983aa8e376c1d005981a8:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:53:55,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:53:55,250 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:53:55,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:55,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c4f64eb1dac983aa8e376c1d005981a8 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-22T18:53:55,251 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:53:55,252 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1541): c4f64eb1dac983aa8e376c1d005981a8/info is initiating minor compaction (all files) 2024-11-22T18:53:55,252 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c4f64eb1dac983aa8e376c1d005981a8/info in TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:55,252 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/d9d0970b80c6453c86f90dcfc7caed55, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/f4f5824ecf0f4cc4a9556e23f34116c9, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/63953aa6d6c34f3f82d826ec51a56b3e] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp, totalSize=53.5 K 2024-11-22T18:53:55,252 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting d9d0970b80c6453c86f90dcfc7caed55, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732301623044 2024-11-22T18:53:55,253 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting f4f5824ecf0f4cc4a9556e23f34116c9, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732301623058 2024-11-22T18:53:55,253 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 63953aa6d6c34f3f82d826ec51a56b3e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732301633211 2024-11-22T18:53:55,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/a48de5a8288242ff939499f37f898429 is 1080, key is row0038/info:/1732301635227/Put/seqid=0 2024-11-22T18:53:55,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741840_1016 (size=20064) 2024-11-22T18:53:55,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741840_1016 (size=20064) 2024-11-22T18:53:55,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/a48de5a8288242ff939499f37f898429 2024-11-22T18:53:55,266 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4f64eb1dac983aa8e376c1d005981a8#info#compaction#59 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:53:55,266 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/8db1816c58684bbd8fafb55e04d7f85c is 1080, key is row0001/info:/1732301623044/Put/seqid=0 2024-11-22T18:53:55,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/a48de5a8288242ff939499f37f898429 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/a48de5a8288242ff939499f37f898429 2024-11-22T18:53:55,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741841_1017 (size=44978) 2024-11-22T18:53:55,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741841_1017 (size=44978) 2024-11-22T18:53:55,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/a48de5a8288242ff939499f37f898429, entries=14, sequenceid=64, filesize=19.6 K 2024-11-22T18:53:55,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for c4f64eb1dac983aa8e376c1d005981a8 in 23ms, sequenceid=64, compaction requested=false 2024-11-22T18:53:55,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c4f64eb1dac983aa8e376c1d005981a8: 2024-11-22T18:53:55,274 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-11-22T18:53:55,274 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:53:55,274 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/f4f5824ecf0f4cc4a9556e23f34116c9 because midkey is the same as first or last row 2024-11-22T18:53:55,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:55,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c4f64eb1dac983aa8e376c1d005981a8 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T18:53:55,277 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/8db1816c58684bbd8fafb55e04d7f85c as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/8db1816c58684bbd8fafb55e04d7f85c 2024-11-22T18:53:55,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/c2569680510d45e99ebc1c44afe954f7 is 1080, key is row0052/info:/1732301635252/Put/seqid=0 2024-11-22T18:53:55,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741842_1018 (size=18987) 2024-11-22T18:53:55,284 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c4f64eb1dac983aa8e376c1d005981a8/info of c4f64eb1dac983aa8e376c1d005981a8 into 8db1816c58684bbd8fafb55e04d7f85c(size=43.9 K), total size for store is 63.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c4f64eb1dac983aa8e376c1d005981a8: 2024-11-22T18:53:55,284 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., storeName=c4f64eb1dac983aa8e376c1d005981a8/info, priority=13, startTime=1732301635250; duration=0sec 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:53:55,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741842_1018 (size=18987) 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/8db1816c58684bbd8fafb55e04d7f85c because midkey is the same as first or last row 2024-11-22T18:53:55,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/c2569680510d45e99ebc1c44afe954f7 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/8db1816c58684bbd8fafb55e04d7f85c because midkey is the same as first or last row 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/8db1816c58684bbd8fafb55e04d7f85c because midkey is the same as first or last row 2024-11-22T18:53:55,284 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:53:55,285 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4f64eb1dac983aa8e376c1d005981a8:info 2024-11-22T18:53:55,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/c2569680510d45e99ebc1c44afe954f7 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/c2569680510d45e99ebc1c44afe954f7 2024-11-22T18:53:55,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/c2569680510d45e99ebc1c44afe954f7, entries=13, sequenceid=80, filesize=18.5 K 2024-11-22T18:53:55,294 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for c4f64eb1dac983aa8e376c1d005981a8 in 18ms, sequenceid=80, compaction requested=true 2024-11-22T18:53:55,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c4f64eb1dac983aa8e376c1d005981a8: 2024-11-22T18:53:55,294 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=82.1 K, sizeToCheck=16.0 K 2024-11-22T18:53:55,294 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:53:55,294 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/8db1816c58684bbd8fafb55e04d7f85c because midkey is the same as first or last row 2024-11-22T18:53:55,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4f64eb1dac983aa8e376c1d005981a8:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:53:55,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:53:55,294 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:53:55,295 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84029 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:53:55,295 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1541): c4f64eb1dac983aa8e376c1d005981a8/info is initiating minor compaction (all files) 2024-11-22T18:53:55,295 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c4f64eb1dac983aa8e376c1d005981a8/info in TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:55,295 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/8db1816c58684bbd8fafb55e04d7f85c, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/a48de5a8288242ff939499f37f898429, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/c2569680510d45e99ebc1c44afe954f7] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp, totalSize=82.1 K 2024-11-22T18:53:55,295 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8db1816c58684bbd8fafb55e04d7f85c, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732301623044 2024-11-22T18:53:55,296 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting a48de5a8288242ff939499f37f898429, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732301635227 2024-11-22T18:53:55,296 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting c2569680510d45e99ebc1c44afe954f7, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732301635252 2024-11-22T18:53:55,306 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4f64eb1dac983aa8e376c1d005981a8#info#compaction#61 average throughput is 32.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:53:55,306 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/4c84add3851d4757b1771807dceb6fbf is 1080, key is row0001/info:/1732301623044/Put/seqid=0 2024-11-22T18:53:55,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741843_1019 (size=74301) 2024-11-22T18:53:55,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741843_1019 (size=74301) 2024-11-22T18:53:55,315 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/.tmp/info/4c84add3851d4757b1771807dceb6fbf as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/4c84add3851d4757b1771807dceb6fbf 2024-11-22T18:53:55,320 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c4f64eb1dac983aa8e376c1d005981a8/info of c4f64eb1dac983aa8e376c1d005981a8 into 4c84add3851d4757b1771807dceb6fbf(size=72.6 K), total size for store is 72.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:53:55,320 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c4f64eb1dac983aa8e376c1d005981a8: 2024-11-22T18:53:55,320 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., storeName=c4f64eb1dac983aa8e376c1d005981a8/info, priority=13, startTime=1732301635294; duration=0sec 2024-11-22T18:53:55,320 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-22T18:53:55,320 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:53:55,321 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-22T18:53:55,321 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:53:55,321 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-22T18:53:55,321 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T18:53:55,322 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:53:55,322 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:53:55,322 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4f64eb1dac983aa8e376c1d005981a8:info 2024-11-22T18:53:55,323 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42005 {}] assignment.AssignmentManager(1363): Split request from d79ba0c344fb,39949,1732301611934, parent={ENCODED => c4f64eb1dac983aa8e376c1d005981a8, NAME => 'TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-22T18:53:55,328 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42005 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:55,332 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42005 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c4f64eb1dac983aa8e376c1d005981a8, daughterA=d4a9db853f3c66542587444426dd218d, daughterB=eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,333 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c4f64eb1dac983aa8e376c1d005981a8, daughterA=d4a9db853f3c66542587444426dd218d, daughterB=eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,333 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c4f64eb1dac983aa8e376c1d005981a8, daughterA=d4a9db853f3c66542587444426dd218d, daughterB=eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,333 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c4f64eb1dac983aa8e376c1d005981a8, daughterA=d4a9db853f3c66542587444426dd218d, daughterB=eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,341 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c4f64eb1dac983aa8e376c1d005981a8, UNASSIGN}] 2024-11-22T18:53:55,342 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c4f64eb1dac983aa8e376c1d005981a8, UNASSIGN 2024-11-22T18:53:55,343 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=c4f64eb1dac983aa8e376c1d005981a8, regionState=CLOSING, regionLocation=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:55,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c4f64eb1dac983aa8e376c1d005981a8, UNASSIGN because future has completed 2024-11-22T18:53:55,346 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-22T18:53:55,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure c4f64eb1dac983aa8e376c1d005981a8, server=d79ba0c344fb,39949,1732301611934}] 2024-11-22T18:53:55,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:55,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:55,504 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:55,504 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-22T18:53:55,505 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing c4f64eb1dac983aa8e376c1d005981a8, disabling compactions & flushes 2024-11-22T18:53:55,505 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:55,505 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:55,505 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. after waiting 0 ms 2024-11-22T18:53:55,505 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:55,506 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/d9d0970b80c6453c86f90dcfc7caed55, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/f4f5824ecf0f4cc4a9556e23f34116c9, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/8db1816c58684bbd8fafb55e04d7f85c, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/63953aa6d6c34f3f82d826ec51a56b3e, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/a48de5a8288242ff939499f37f898429, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/c2569680510d45e99ebc1c44afe954f7] to archive 2024-11-22T18:53:55,507 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T18:53:55,509 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/d9d0970b80c6453c86f90dcfc7caed55 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/d9d0970b80c6453c86f90dcfc7caed55 2024-11-22T18:53:55,511 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/f4f5824ecf0f4cc4a9556e23f34116c9 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/f4f5824ecf0f4cc4a9556e23f34116c9 2024-11-22T18:53:55,512 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/8db1816c58684bbd8fafb55e04d7f85c to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/8db1816c58684bbd8fafb55e04d7f85c 2024-11-22T18:53:55,513 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/63953aa6d6c34f3f82d826ec51a56b3e to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/63953aa6d6c34f3f82d826ec51a56b3e 2024-11-22T18:53:55,514 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/a48de5a8288242ff939499f37f898429 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/a48de5a8288242ff939499f37f898429 2024-11-22T18:53:55,515 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/c2569680510d45e99ebc1c44afe954f7 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/c2569680510d45e99ebc1c44afe954f7 2024-11-22T18:53:55,522 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=1 2024-11-22T18:53:55,523 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. 2024-11-22T18:53:55,524 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for c4f64eb1dac983aa8e376c1d005981a8: Waiting for close lock at 1732301635504Running coprocessor pre-close hooks at 1732301635504Disabling compacts and flushes for region at 1732301635504Disabling writes for close at 1732301635505 (+1 ms)Writing region close event to WAL at 1732301635518 (+13 ms)Running coprocessor post-close hooks at 1732301635523 (+5 ms)Closed at 1732301635523 2024-11-22T18:53:55,526 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:55,527 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=c4f64eb1dac983aa8e376c1d005981a8, regionState=CLOSED 2024-11-22T18:53:55,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure c4f64eb1dac983aa8e376c1d005981a8, server=d79ba0c344fb,39949,1732301611934 because future has completed 2024-11-22T18:53:55,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-22T18:53:55,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure c4f64eb1dac983aa8e376c1d005981a8, server=d79ba0c344fb,39949,1732301611934 in 185 msec 2024-11-22T18:53:55,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-22T18:53:55,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c4f64eb1dac983aa8e376c1d005981a8, UNASSIGN in 194 msec 2024-11-22T18:53:55,546 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:55,549 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 1 storefiles, region=c4f64eb1dac983aa8e376c1d005981a8, threads=1 2024-11-22T18:53:55,551 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/4c84add3851d4757b1771807dceb6fbf for region: c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:55,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741844_1020 (size=27) 2024-11-22T18:53:55,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741844_1020 (size=27) 2024-11-22T18:53:55,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741845_1021 (size=27) 2024-11-22T18:53:55,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741845_1021 (size=27) 2024-11-22T18:53:55,575 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/4c84add3851d4757b1771807dceb6fbf for region: c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:53:55,577 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region c4f64eb1dac983aa8e376c1d005981a8 Daughter A: [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8] storefiles, Daughter B: [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8] storefiles. 2024-11-22T18:53:55,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741846_1022 (size=71) 2024-11-22T18:53:55,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741846_1022 (size=71) 2024-11-22T18:53:55,586 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:55,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741847_1023 (size=71) 2024-11-22T18:53:55,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741847_1023 (size=71) 2024-11-22T18:53:55,598 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:55,606 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-22T18:53:55,607 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-22T18:53:55,610 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732301635609"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732301635609"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732301635609"}]},"ts":"1732301635609"} 2024-11-22T18:53:55,610 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732301635609"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732301635609"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732301635609"}]},"ts":"1732301635609"} 2024-11-22T18:53:55,610 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732301635609"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732301635609"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732301635609"}]},"ts":"1732301635609"} 2024-11-22T18:53:55,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4a9db853f3c66542587444426dd218d, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eb2200d179c5d39808e2d3701a1d9793, ASSIGN}] 2024-11-22T18:53:55,631 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4a9db853f3c66542587444426dd218d, ASSIGN 2024-11-22T18:53:55,631 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eb2200d179c5d39808e2d3701a1d9793, ASSIGN 2024-11-22T18:53:55,631 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4a9db853f3c66542587444426dd218d, ASSIGN; state=SPLITTING_NEW, location=d79ba0c344fb,39949,1732301611934; forceNewPlan=false, retain=false 2024-11-22T18:53:55,632 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eb2200d179c5d39808e2d3701a1d9793, ASSIGN; state=SPLITTING_NEW, location=d79ba0c344fb,39949,1732301611934; forceNewPlan=false, retain=false 2024-11-22T18:53:55,782 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=d4a9db853f3c66542587444426dd218d, regionState=OPENING, regionLocation=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:55,782 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=eb2200d179c5d39808e2d3701a1d9793, regionState=OPENING, regionLocation=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:55,785 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4a9db853f3c66542587444426dd218d, ASSIGN because future has completed 2024-11-22T18:53:55,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure d4a9db853f3c66542587444426dd218d, server=d79ba0c344fb,39949,1732301611934}] 2024-11-22T18:53:55,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eb2200d179c5d39808e2d3701a1d9793, ASSIGN because future has completed 2024-11-22T18:53:55,787 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934}] 2024-11-22T18:53:55,943 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:53:55,943 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => eb2200d179c5d39808e2d3701a1d9793, NAME => 'TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-22T18:53:55,943 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,943 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:53:55,943 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,943 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,945 INFO [StoreOpener-eb2200d179c5d39808e2d3701a1d9793-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,947 INFO [StoreOpener-eb2200d179c5d39808e2d3701a1d9793-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eb2200d179c5d39808e2d3701a1d9793 columnFamilyName info 2024-11-22T18:53:55,947 DEBUG [StoreOpener-eb2200d179c5d39808e2d3701a1d9793-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:55,960 DEBUG [StoreOpener-eb2200d179c5d39808e2d3701a1d9793-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8->hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/4c84add3851d4757b1771807dceb6fbf-top 2024-11-22T18:53:55,961 INFO [StoreOpener-eb2200d179c5d39808e2d3701a1d9793-1 {}] regionserver.HStore(327): Store=eb2200d179c5d39808e2d3701a1d9793/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:53:55,961 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,962 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,963 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,964 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,964 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,966 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,967 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened eb2200d179c5d39808e2d3701a1d9793; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714360, jitterRate=-0.09164497256278992}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T18:53:55,968 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:53:55,968 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for eb2200d179c5d39808e2d3701a1d9793: Running coprocessor pre-open hook at 1732301635943Writing region info on filesystem at 1732301635944 (+1 ms)Initializing all the Stores at 1732301635944Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301635945 (+1 ms)Cleaning up temporary data from old regions at 1732301635964 (+19 ms)Running coprocessor post-open hooks at 1732301635968 (+4 ms)Region opened successfully at 1732301635968 2024-11-22T18:53:55,970 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., pid=13, masterSystemTime=1732301635938 2024-11-22T18:53:55,970 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store eb2200d179c5d39808e2d3701a1d9793:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:53:55,970 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-22T18:53:55,970 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:53:55,971 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:53:55,971 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1541): eb2200d179c5d39808e2d3701a1d9793/info is initiating minor compaction (all files) 2024-11-22T18:53:55,971 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb2200d179c5d39808e2d3701a1d9793/info in TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:53:55,972 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8->hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/4c84add3851d4757b1771807dceb6fbf-top] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp, totalSize=72.6 K 2024-11-22T18:53:55,973 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:53:55,973 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:53:55,973 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. 2024-11-22T18:53:55,973 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => d4a9db853f3c66542587444426dd218d, NAME => 'TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-22T18:53:55,973 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732301623044 2024-11-22T18:53:55,974 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=eb2200d179c5d39808e2d3701a1d9793, regionState=OPEN, openSeqNum=86, regionLocation=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:55,974 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:55,974 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:53:55,975 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:55,975 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:55,976 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-22T18:53:55,976 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-22T18:53:55,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-22T18:53:55,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934 because future has completed 2024-11-22T18:53:55,985 INFO [StoreOpener-d4a9db853f3c66542587444426dd218d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:55,986 INFO [StoreOpener-d4a9db853f3c66542587444426dd218d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d4a9db853f3c66542587444426dd218d columnFamilyName info 2024-11-22T18:53:55,986 DEBUG [StoreOpener-d4a9db853f3c66542587444426dd218d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:53:55,989 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-22T18:53:55,989 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934 in 199 msec 2024-11-22T18:53:55,991 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eb2200d179c5d39808e2d3701a1d9793, ASSIGN in 360 msec 2024-11-22T18:53:56,002 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb2200d179c5d39808e2d3701a1d9793#info#compaction#62 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:53:56,002 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/c1c6ca6e80694a4090eb0aae45a9eb56 is 1080, key is row0062/info:/1732301635270/Put/seqid=0 2024-11-22T18:53:56,004 DEBUG [StoreOpener-d4a9db853f3c66542587444426dd218d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8->hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/4c84add3851d4757b1771807dceb6fbf-bottom 2024-11-22T18:53:56,005 INFO [StoreOpener-d4a9db853f3c66542587444426dd218d-1 {}] regionserver.HStore(327): Store=d4a9db853f3c66542587444426dd218d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:53:56,005 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:56,006 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:56,008 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:56,008 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:56,008 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:56,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/info/e9f37c5a267c480c94966db53b96e7f4 is 193, key is TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793./info:regioninfo/1732301635973/Put/seqid=0 2024-11-22T18:53:56,011 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:56,012 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened d4a9db853f3c66542587444426dd218d; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760418, jitterRate=-0.03307950496673584}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T18:53:56,012 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d4a9db853f3c66542587444426dd218d 2024-11-22T18:53:56,012 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for d4a9db853f3c66542587444426dd218d: Running coprocessor pre-open hook at 1732301635975Writing region info on filesystem at 1732301635975Initializing all the Stores at 1732301635977 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301635977Cleaning up temporary data from old regions at 1732301636008 (+31 ms)Running coprocessor post-open hooks at 1732301636012 (+4 ms)Region opened successfully at 1732301636012 2024-11-22T18:53:56,013 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d., pid=12, masterSystemTime=1732301635938 2024-11-22T18:53:56,013 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store d4a9db853f3c66542587444426dd218d:info, priority=-2147483648, current under compaction store size is 2 2024-11-22T18:53:56,013 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:53:56,014 DEBUG [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-22T18:53:56,014 INFO [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. 2024-11-22T18:53:56,014 DEBUG [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] regionserver.HStore(1541): d4a9db853f3c66542587444426dd218d/info is initiating minor compaction (all files) 2024-11-22T18:53:56,014 INFO [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d4a9db853f3c66542587444426dd218d/info in TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. 2024-11-22T18:53:56,015 INFO [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8->hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/4c84add3851d4757b1771807dceb6fbf-bottom] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/.tmp, totalSize=72.6 K 2024-11-22T18:53:56,016 DEBUG [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] compactions.Compactor(225): Compacting 4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732301623044 2024-11-22T18:53:56,017 DEBUG [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. 2024-11-22T18:53:56,017 INFO [RS_OPEN_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. 2024-11-22T18:53:56,017 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=d4a9db853f3c66542587444426dd218d, regionState=OPEN, openSeqNum=86, regionLocation=d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:56,020 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure d4a9db853f3c66542587444426dd218d, server=d79ba0c344fb,39949,1732301611934 because future has completed 2024-11-22T18:53:56,024 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-22T18:53:56,024 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure d4a9db853f3c66542587444426dd218d, server=d79ba0c344fb,39949,1732301611934 in 237 msec 2024-11-22T18:53:56,027 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-22T18:53:56,027 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4a9db853f3c66542587444426dd218d, ASSIGN in 395 msec 2024-11-22T18:53:56,029 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c4f64eb1dac983aa8e376c1d005981a8, daughterA=d4a9db853f3c66542587444426dd218d, daughterB=eb2200d179c5d39808e2d3701a1d9793 in 699 msec 2024-11-22T18:53:56,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741848_1024 (size=8260) 2024-11-22T18:53:56,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741848_1024 (size=8260) 2024-11-22T18:53:56,050 INFO [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d4a9db853f3c66542587444426dd218d#info#compaction#64 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:53:56,051 DEBUG [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/.tmp/info/7d61ec2c97f14def8a272c6c5c5598c6 is 1080, key is row0001/info:/1732301623044/Put/seqid=0 2024-11-22T18:53:56,055 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/c1c6ca6e80694a4090eb0aae45a9eb56 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/c1c6ca6e80694a4090eb0aae45a9eb56 2024-11-22T18:53:56,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741849_1025 (size=9882) 2024-11-22T18:53:56,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741849_1025 (size=9882) 2024-11-22T18:53:56,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/info/e9f37c5a267c480c94966db53b96e7f4 2024-11-22T18:53:56,062 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in eb2200d179c5d39808e2d3701a1d9793/info of eb2200d179c5d39808e2d3701a1d9793 into c1c6ca6e80694a4090eb0aae45a9eb56(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:53:56,062 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:53:56,062 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., storeName=eb2200d179c5d39808e2d3701a1d9793/info, priority=15, startTime=1732301635970; duration=0sec 2024-11-22T18:53:56,062 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:53:56,062 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb2200d179c5d39808e2d3701a1d9793:info 2024-11-22T18:53:56,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741850_1026 (size=70862) 2024-11-22T18:53:56,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741850_1026 (size=70862) 2024-11-22T18:53:56,074 DEBUG [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/.tmp/info/7d61ec2c97f14def8a272c6c5c5598c6 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/info/7d61ec2c97f14def8a272c6c5c5598c6 2024-11-22T18:53:56,081 INFO [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in d4a9db853f3c66542587444426dd218d/info of d4a9db853f3c66542587444426dd218d into 7d61ec2c97f14def8a272c6c5c5598c6(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:53:56,081 DEBUG [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d4a9db853f3c66542587444426dd218d: 2024-11-22T18:53:56,081 INFO [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d., storeName=d4a9db853f3c66542587444426dd218d/info, priority=15, startTime=1732301636013; duration=0sec 2024-11-22T18:53:56,081 DEBUG [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:53:56,081 DEBUG [RS:0;d79ba0c344fb:39949-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d4a9db853f3c66542587444426dd218d:info 2024-11-22T18:53:56,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/ns/426a35fe2f67475c903e7a2220c1084f is 43, key is default/ns:d/1732301612849/Put/seqid=0 2024-11-22T18:53:56,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741851_1027 (size=5153) 2024-11-22T18:53:56,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741851_1027 (size=5153) 2024-11-22T18:53:56,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/ns/426a35fe2f67475c903e7a2220c1084f 2024-11-22T18:53:56,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/table/94effead3c2c4f26b30a5bae3e4cdd59 is 65, key is TestLogRolling-testLogRolling/table:state/1732301613343/Put/seqid=0 2024-11-22T18:53:56,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741852_1028 (size=5340) 2024-11-22T18:53:56,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741852_1028 (size=5340) 2024-11-22T18:53:56,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/table/94effead3c2c4f26b30a5bae3e4cdd59 2024-11-22T18:53:56,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/info/e9f37c5a267c480c94966db53b96e7f4 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/info/e9f37c5a267c480c94966db53b96e7f4 2024-11-22T18:53:56,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/info/e9f37c5a267c480c94966db53b96e7f4, entries=30, sequenceid=17, filesize=9.7 K 2024-11-22T18:53:56,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/ns/426a35fe2f67475c903e7a2220c1084f as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/ns/426a35fe2f67475c903e7a2220c1084f 2024-11-22T18:53:56,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/ns/426a35fe2f67475c903e7a2220c1084f, entries=2, sequenceid=17, filesize=5.0 K 2024-11-22T18:53:56,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/table/94effead3c2c4f26b30a5bae3e4cdd59 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/table/94effead3c2c4f26b30a5bae3e4cdd59 2024-11-22T18:53:56,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/table/94effead3c2c4f26b30a5bae3e4cdd59, entries=2, sequenceid=17, filesize=5.2 K 2024-11-22T18:53:56,157 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 180ms, sequenceid=17, compaction requested=false 2024-11-22T18:53:56,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T18:53:56,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:56,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:57,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38364 deadline: 1732301647277, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. is not online on d79ba0c344fb,39949,1732301611934 2024-11-22T18:53:57,278 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., hostname=d79ba0c344fb,39949,1732301611934, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., hostname=d79ba0c344fb,39949,1732301611934, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. is not online on d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T18:53:57,278 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., hostname=d79ba0c344fb,39949,1732301611934, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8. is not online on d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T18:53:57,278 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732301612970.c4f64eb1dac983aa8e376c1d005981a8., hostname=d79ba0c344fb,39949,1732301611934, seqNum=2 from cache 2024-11-22T18:53:57,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:57,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:58,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:58,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:59,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:53:59,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:00,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:00,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:00,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:00,552 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,061 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T18:54:01,062 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,062 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,096 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,098 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,099 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,099 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T18:54:01,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:01,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:01,860 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T18:54:02,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:02,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:03,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:03,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:04,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:04,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:05,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:05,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:06,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:06,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:07,386 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., hostname=d79ba0c344fb,39949,1732301611934, seqNum=86] 2024-11-22T18:54:07,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:07,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T18:54:07,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/4c49c4a8d0dd481ea2abf8e77746d507 is 1080, key is row0065/info:/1732301647387/Put/seqid=0 2024-11-22T18:54:07,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741853_1029 (size=12509) 2024-11-22T18:54:07,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741853_1029 (size=12509) 2024-11-22T18:54:07,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/4c49c4a8d0dd481ea2abf8e77746d507 2024-11-22T18:54:07,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/4c49c4a8d0dd481ea2abf8e77746d507 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c49c4a8d0dd481ea2abf8e77746d507 2024-11-22T18:54:07,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c49c4a8d0dd481ea2abf8e77746d507, entries=7, sequenceid=96, filesize=12.2 K 2024-11-22T18:54:07,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for eb2200d179c5d39808e2d3701a1d9793 in 31ms, sequenceid=96, compaction requested=false 2024-11-22T18:54:07,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:07,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:07,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:07,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:07,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-22T18:54:07,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/7b003ec62e014d909409887ad666dd65 is 1080, key is row0072/info:/1732301647400/Put/seqid=0 2024-11-22T18:54:07,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741854_1030 (size=24376) 2024-11-22T18:54:07,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741854_1030 (size=24376) 2024-11-22T18:54:07,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/7b003ec62e014d909409887ad666dd65 2024-11-22T18:54:07,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/7b003ec62e014d909409887ad666dd65 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7b003ec62e014d909409887ad666dd65 2024-11-22T18:54:07,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7b003ec62e014d909409887ad666dd65, entries=18, sequenceid=117, filesize=23.8 K 2024-11-22T18:54:07,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=7.36 KB/7532 for eb2200d179c5d39808e2d3701a1d9793 in 23ms, sequenceid=117, compaction requested=true 2024-11-22T18:54:07,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:07,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eb2200d179c5d39808e2d3701a1d9793:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:54:07,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:07,454 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:54:07,455 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45145 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:54:07,455 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1541): eb2200d179c5d39808e2d3701a1d9793/info is initiating minor compaction (all files) 2024-11-22T18:54:07,455 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb2200d179c5d39808e2d3701a1d9793/info in TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:07,455 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/c1c6ca6e80694a4090eb0aae45a9eb56, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c49c4a8d0dd481ea2abf8e77746d507, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7b003ec62e014d909409887ad666dd65] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp, totalSize=44.1 K 2024-11-22T18:54:07,456 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting c1c6ca6e80694a4090eb0aae45a9eb56, keycount=3, bloomtype=ROW, size=8.1 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732301635270 2024-11-22T18:54:07,456 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4c49c4a8d0dd481ea2abf8e77746d507, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732301647387 2024-11-22T18:54:07,457 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b003ec62e014d909409887ad666dd65, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732301647400 2024-11-22T18:54:07,466 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb2200d179c5d39808e2d3701a1d9793#info#compaction#69 average throughput is 28.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:54:07,467 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/8c14311808fe425986154cfbd713ca2b is 1080, key is row0062/info:/1732301635270/Put/seqid=0 2024-11-22T18:54:07,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741855_1031 (size=35349) 2024-11-22T18:54:07,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741855_1031 (size=35349) 2024-11-22T18:54:07,477 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/8c14311808fe425986154cfbd713ca2b as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8c14311808fe425986154cfbd713ca2b 2024-11-22T18:54:07,482 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eb2200d179c5d39808e2d3701a1d9793/info of eb2200d179c5d39808e2d3701a1d9793 into 8c14311808fe425986154cfbd713ca2b(size=34.5 K), total size for store is 34.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:54:07,482 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:07,482 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., storeName=eb2200d179c5d39808e2d3701a1d9793/info, priority=13, startTime=1732301647454; duration=0sec 2024-11-22T18:54:07,482 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:07,482 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb2200d179c5d39808e2d3701a1d9793:info 2024-11-22T18:54:08,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:08,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:09,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:09,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:09,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:09,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-22T18:54:09,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/452b6c39b22643aaad16b79ea6724845 is 1080, key is row0090/info:/1732301647432/Put/seqid=0 2024-11-22T18:54:09,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741856_1032 (size=13587) 2024-11-22T18:54:09,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741856_1032 (size=13587) 2024-11-22T18:54:09,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/452b6c39b22643aaad16b79ea6724845 2024-11-22T18:54:09,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/452b6c39b22643aaad16b79ea6724845 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/452b6c39b22643aaad16b79ea6724845 2024-11-22T18:54:09,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/452b6c39b22643aaad16b79ea6724845, entries=8, sequenceid=129, filesize=13.3 K 2024-11-22T18:54:09,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=11.56 KB/11836 for eb2200d179c5d39808e2d3701a1d9793 in 22ms, sequenceid=129, compaction requested=false 2024-11-22T18:54:09,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:09,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:09,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T18:54:09,476 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/70008b0af8fe49af82c75e4a2f60f0a9 is 1080, key is row0098/info:/1732301649448/Put/seqid=0 2024-11-22T18:54:09,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741857_1033 (size=17906) 2024-11-22T18:54:09,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741857_1033 (size=17906) 2024-11-22T18:54:09,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/70008b0af8fe49af82c75e4a2f60f0a9 2024-11-22T18:54:09,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/70008b0af8fe49af82c75e4a2f60f0a9 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/70008b0af8fe49af82c75e4a2f60f0a9 2024-11-22T18:54:09,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/70008b0af8fe49af82c75e4a2f60f0a9, entries=12, sequenceid=144, filesize=17.5 K 2024-11-22T18:54:09,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for eb2200d179c5d39808e2d3701a1d9793 in 29ms, sequenceid=144, compaction requested=true 2024-11-22T18:54:09,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:09,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eb2200d179c5d39808e2d3701a1d9793:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:54:09,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:09,499 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:54:09,500 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 66842 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:54:09,500 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1541): eb2200d179c5d39808e2d3701a1d9793/info is initiating minor compaction (all files) 2024-11-22T18:54:09,500 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb2200d179c5d39808e2d3701a1d9793/info in TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:09,500 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8c14311808fe425986154cfbd713ca2b, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/452b6c39b22643aaad16b79ea6724845, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/70008b0af8fe49af82c75e4a2f60f0a9] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp, totalSize=65.3 K 2024-11-22T18:54:09,501 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8c14311808fe425986154cfbd713ca2b, keycount=28, bloomtype=ROW, size=34.5 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732301635270 2024-11-22T18:54:09,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:09,501 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-22T18:54:09,501 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 452b6c39b22643aaad16b79ea6724845, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732301647432 2024-11-22T18:54:09,502 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70008b0af8fe49af82c75e4a2f60f0a9, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732301649448 2024-11-22T18:54:09,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/cde59477ed7e455aa43b4b355dab9b31 is 1080, key is row0110/info:/1732301649471/Put/seqid=0 2024-11-22T18:54:09,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741858_1034 (size=22238) 2024-11-22T18:54:09,515 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb2200d179c5d39808e2d3701a1d9793#info#compaction#73 average throughput is 49.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:54:09,516 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/fac58280b5d842d880152f8e3c2ce761 is 1080, key is row0062/info:/1732301635270/Put/seqid=0 2024-11-22T18:54:09,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741858_1034 (size=22238) 2024-11-22T18:54:09,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/cde59477ed7e455aa43b4b355dab9b31 2024-11-22T18:54:09,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741859_1035 (size=57012) 2024-11-22T18:54:09,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741859_1035 (size=57012) 2024-11-22T18:54:09,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/cde59477ed7e455aa43b4b355dab9b31 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/cde59477ed7e455aa43b4b355dab9b31 2024-11-22T18:54:09,552 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/fac58280b5d842d880152f8e3c2ce761 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fac58280b5d842d880152f8e3c2ce761 2024-11-22T18:54:09,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/cde59477ed7e455aa43b4b355dab9b31, entries=16, sequenceid=163, filesize=21.7 K 2024-11-22T18:54:09,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=3.15 KB/3228 for eb2200d179c5d39808e2d3701a1d9793 in 53ms, sequenceid=163, compaction requested=false 2024-11-22T18:54:09,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:09,560 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eb2200d179c5d39808e2d3701a1d9793/info of eb2200d179c5d39808e2d3701a1d9793 into fac58280b5d842d880152f8e3c2ce761(size=55.7 K), total size for store is 77.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:54:09,560 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:09,560 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., storeName=eb2200d179c5d39808e2d3701a1d9793/info, priority=13, startTime=1732301649499; duration=0sec 2024-11-22T18:54:09,561 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:09,561 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb2200d179c5d39808e2d3701a1d9793:info 2024-11-22T18:54:10,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:10,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:11,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:11,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:11,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:11,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T18:54:11,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/fcaeeefe7e974664be56a58547950174 is 1080, key is row0126/info:/1732301649502/Put/seqid=0 2024-11-22T18:54:11,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741860_1036 (size=12516) 2024-11-22T18:54:11,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/fcaeeefe7e974664be56a58547950174 2024-11-22T18:54:11,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741860_1036 (size=12516) 2024-11-22T18:54:11,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/fcaeeefe7e974664be56a58547950174 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fcaeeefe7e974664be56a58547950174 2024-11-22T18:54:11,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fcaeeefe7e974664be56a58547950174, entries=7, sequenceid=174, filesize=12.2 K 2024-11-22T18:54:11,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for eb2200d179c5d39808e2d3701a1d9793 in 23ms, sequenceid=174, compaction requested=true 2024-11-22T18:54:11,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:11,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eb2200d179c5d39808e2d3701a1d9793:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:54:11,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:11,540 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:54:11,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T18:54:11,541 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 91766 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:54:11,541 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1541): eb2200d179c5d39808e2d3701a1d9793/info is initiating minor compaction (all files) 2024-11-22T18:54:11,541 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb2200d179c5d39808e2d3701a1d9793/info in TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:11,541 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fac58280b5d842d880152f8e3c2ce761, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/cde59477ed7e455aa43b4b355dab9b31, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fcaeeefe7e974664be56a58547950174] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp, totalSize=89.6 K 2024-11-22T18:54:11,542 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting fac58280b5d842d880152f8e3c2ce761, keycount=48, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732301635270 2024-11-22T18:54:11,542 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting cde59477ed7e455aa43b4b355dab9b31, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732301649471 2024-11-22T18:54:11,543 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting fcaeeefe7e974664be56a58547950174, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732301649502 2024-11-22T18:54:11,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/7af1c79ac2da4d1fb087e27df9a6b33c is 1080, key is row0133/info:/1732301651517/Put/seqid=0 2024-11-22T18:54:11,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741861_1037 (size=19000) 2024-11-22T18:54:11,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741861_1037 (size=19000) 2024-11-22T18:54:11,562 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb2200d179c5d39808e2d3701a1d9793#info#compaction#76 average throughput is 36.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:54:11,563 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/a081c0cf14d3498fac6d9d46bd8af609 is 1080, key is row0062/info:/1732301635270/Put/seqid=0 2024-11-22T18:54:11,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/7af1c79ac2da4d1fb087e27df9a6b33c 2024-11-22T18:54:11,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/7af1c79ac2da4d1fb087e27df9a6b33c as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7af1c79ac2da4d1fb087e27df9a6b33c 2024-11-22T18:54:11,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741862_1038 (size=82045) 2024-11-22T18:54:11,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741862_1038 (size=82045) 2024-11-22T18:54:11,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7af1c79ac2da4d1fb087e27df9a6b33c, entries=13, sequenceid=190, filesize=18.6 K 2024-11-22T18:54:11,576 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=15.76 KB/16140 for eb2200d179c5d39808e2d3701a1d9793 in 36ms, sequenceid=190, compaction requested=false 2024-11-22T18:54:11,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:11,578 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/a081c0cf14d3498fac6d9d46bd8af609 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/a081c0cf14d3498fac6d9d46bd8af609 2024-11-22T18:54:11,583 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eb2200d179c5d39808e2d3701a1d9793/info of eb2200d179c5d39808e2d3701a1d9793 into a081c0cf14d3498fac6d9d46bd8af609(size=80.1 K), total size for store is 98.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:54:11,583 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:11,583 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., storeName=eb2200d179c5d39808e2d3701a1d9793/info, priority=13, startTime=1732301651540; duration=0sec 2024-11-22T18:54:11,583 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:11,583 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb2200d179c5d39808e2d3701a1d9793:info 2024-11-22T18:54:12,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:12,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:12,867 INFO [master/d79ba0c344fb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T18:54:12,867 INFO [master/d79ba0c344fb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T18:54:13,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:13,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:13,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-22T18:54:13,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/46f1bd73358641de89d17f915e7b5548 is 1080, key is row0146/info:/1732301651541/Put/seqid=0 2024-11-22T18:54:13,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741863_1039 (size=22238) 2024-11-22T18:54:13,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741863_1039 (size=22238) 2024-11-22T18:54:13,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/46f1bd73358641de89d17f915e7b5548 2024-11-22T18:54:13,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/46f1bd73358641de89d17f915e7b5548 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/46f1bd73358641de89d17f915e7b5548 2024-11-22T18:54:13,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/46f1bd73358641de89d17f915e7b5548, entries=16, sequenceid=210, filesize=21.7 K 2024-11-22T18:54:13,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for eb2200d179c5d39808e2d3701a1d9793 in 22ms, sequenceid=210, compaction requested=true 2024-11-22T18:54:13,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:13,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eb2200d179c5d39808e2d3701a1d9793:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:54:13,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:13,591 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:54:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:13,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-22T18:54:13,592 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:54:13,592 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1541): eb2200d179c5d39808e2d3701a1d9793/info is initiating minor compaction (all files) 2024-11-22T18:54:13,592 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb2200d179c5d39808e2d3701a1d9793/info in TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:13,592 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/a081c0cf14d3498fac6d9d46bd8af609, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7af1c79ac2da4d1fb087e27df9a6b33c, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/46f1bd73358641de89d17f915e7b5548] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp, totalSize=120.4 K 2024-11-22T18:54:13,593 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting a081c0cf14d3498fac6d9d46bd8af609, keycount=71, bloomtype=ROW, size=80.1 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732301635270 2024-11-22T18:54:13,593 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7af1c79ac2da4d1fb087e27df9a6b33c, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1732301651517 2024-11-22T18:54:13,594 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 46f1bd73358641de89d17f915e7b5548, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732301651541 2024-11-22T18:54:13,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/8f24774e2e244fdc971572f214d11cd9 is 1080, key is row0162/info:/1732301653570/Put/seqid=0 2024-11-22T18:54:13,607 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb2200d179c5d39808e2d3701a1d9793#info#compaction#79 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:54:13,608 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/8974cc4e2a2c490287c28679c17fca02 is 1080, key is row0062/info:/1732301635270/Put/seqid=0 2024-11-22T18:54:13,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741865_1041 (size=113417) 2024-11-22T18:54:13,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741865_1041 (size=113417) 2024-11-22T18:54:13,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741864_1040 (size=20078) 2024-11-22T18:54:13,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741864_1040 (size=20078) 2024-11-22T18:54:13,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/8f24774e2e244fdc971572f214d11cd9 2024-11-22T18:54:13,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-22T18:54:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38364 deadline: 1732301663619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934 2024-11-22T18:54:13,620 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., hostname=d79ba0c344fb,39949,1732301611934, seqNum=86 , the old value is region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., hostname=d79ba0c344fb,39949,1732301611934, seqNum=86, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T18:54:13,620 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., hostname=d79ba0c344fb,39949,1732301611934, seqNum=86 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T18:54:13,620 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., hostname=d79ba0c344fb,39949,1732301611934, seqNum=86 because the exception is null or not the one we care about 2024-11-22T18:54:13,622 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/8974cc4e2a2c490287c28679c17fca02 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8974cc4e2a2c490287c28679c17fca02 2024-11-22T18:54:13,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/8f24774e2e244fdc971572f214d11cd9 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8f24774e2e244fdc971572f214d11cd9 2024-11-22T18:54:13,628 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eb2200d179c5d39808e2d3701a1d9793/info of eb2200d179c5d39808e2d3701a1d9793 into 8974cc4e2a2c490287c28679c17fca02(size=110.8 K), total size for store is 110.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:54:13,628 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:13,628 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., storeName=eb2200d179c5d39808e2d3701a1d9793/info, priority=13, startTime=1732301653591; duration=0sec 2024-11-22T18:54:13,629 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:13,629 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb2200d179c5d39808e2d3701a1d9793:info 2024-11-22T18:54:13,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8f24774e2e244fdc971572f214d11cd9, entries=14, sequenceid=227, filesize=19.6 K 2024-11-22T18:54:13,630 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for eb2200d179c5d39808e2d3701a1d9793 in 38ms, sequenceid=227, compaction requested=false 2024-11-22T18:54:13,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:14,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:14,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:15,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:15,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:16,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:16,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:17,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:17,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:17,809 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-22T18:54:18,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:18,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:19,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:19,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:20,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:20,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:21,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:21,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:22,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:22,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:23,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:23,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:23,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:23,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-22T18:54:23,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/130c4697b80a4e688c88ad864ced952a is 1080, key is row0176/info:/1732301653593/Put/seqid=0 2024-11-22T18:54:23,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741866_1042 (size=22238) 2024-11-22T18:54:23,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741866_1042 (size=22238) 2024-11-22T18:54:23,689 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/130c4697b80a4e688c88ad864ced952a 2024-11-22T18:54:23,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/130c4697b80a4e688c88ad864ced952a as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/130c4697b80a4e688c88ad864ced952a 2024-11-22T18:54:23,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/130c4697b80a4e688c88ad864ced952a, entries=16, sequenceid=247, filesize=21.7 K 2024-11-22T18:54:23,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=1.05 KB/1076 for eb2200d179c5d39808e2d3701a1d9793 in 22ms, sequenceid=247, compaction requested=true 2024-11-22T18:54:23,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:23,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eb2200d179c5d39808e2d3701a1d9793:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:54:23,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:23,702 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:54:23,703 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 155733 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:54:23,703 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1541): eb2200d179c5d39808e2d3701a1d9793/info is initiating minor compaction (all files) 2024-11-22T18:54:23,703 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb2200d179c5d39808e2d3701a1d9793/info in TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:23,703 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8974cc4e2a2c490287c28679c17fca02, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8f24774e2e244fdc971572f214d11cd9, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/130c4697b80a4e688c88ad864ced952a] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp, totalSize=152.1 K 2024-11-22T18:54:23,703 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8974cc4e2a2c490287c28679c17fca02, keycount=100, bloomtype=ROW, size=110.8 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732301635270 2024-11-22T18:54:23,704 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8f24774e2e244fdc971572f214d11cd9, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732301653570 2024-11-22T18:54:23,704 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 130c4697b80a4e688c88ad864ced952a, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732301653593 2024-11-22T18:54:23,715 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb2200d179c5d39808e2d3701a1d9793#info#compaction#81 average throughput is 66.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:54:23,716 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/0dbb5e486b424ae494f06e3d62b30a06 is 1080, key is row0062/info:/1732301635270/Put/seqid=0 2024-11-22T18:54:23,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741867_1043 (size=146064) 2024-11-22T18:54:23,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741867_1043 (size=146064) 2024-11-22T18:54:23,727 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/0dbb5e486b424ae494f06e3d62b30a06 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/0dbb5e486b424ae494f06e3d62b30a06 2024-11-22T18:54:23,732 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eb2200d179c5d39808e2d3701a1d9793/info of eb2200d179c5d39808e2d3701a1d9793 into 0dbb5e486b424ae494f06e3d62b30a06(size=142.6 K), total size for store is 142.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:54:23,732 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:23,733 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., storeName=eb2200d179c5d39808e2d3701a1d9793/info, priority=13, startTime=1732301663702; duration=0sec 2024-11-22T18:54:23,733 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:23,733 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb2200d179c5d39808e2d3701a1d9793:info 2024-11-22T18:54:24,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:24,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:25,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:25,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:25,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T18:54:25,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/e3baf80aee9240b3896930ad3532ad38 is 1080, key is row0192/info:/1732301663680/Put/seqid=0 2024-11-22T18:54:25,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741868_1044 (size=12518) 2024-11-22T18:54:25,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741868_1044 (size=12518) 2024-11-22T18:54:25,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/e3baf80aee9240b3896930ad3532ad38 2024-11-22T18:54:25,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/e3baf80aee9240b3896930ad3532ad38 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e3baf80aee9240b3896930ad3532ad38 2024-11-22T18:54:25,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e3baf80aee9240b3896930ad3532ad38, entries=7, sequenceid=258, filesize=12.2 K 2024-11-22T18:54:25,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for eb2200d179c5d39808e2d3701a1d9793 in 22ms, sequenceid=258, compaction requested=false 2024-11-22T18:54:25,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:25,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-22T18:54:25,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/6e35238561e64bfd97d8b9910797cc2b is 1080, key is row0199/info:/1732301665693/Put/seqid=0 2024-11-22T18:54:25,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741869_1045 (size=20092) 2024-11-22T18:54:25,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741869_1045 (size=20092) 2024-11-22T18:54:25,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/6e35238561e64bfd97d8b9910797cc2b 2024-11-22T18:54:25,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/6e35238561e64bfd97d8b9910797cc2b as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/6e35238561e64bfd97d8b9910797cc2b 2024-11-22T18:54:25,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/6e35238561e64bfd97d8b9910797cc2b, entries=14, sequenceid=275, filesize=19.6 K 2024-11-22T18:54:25,737 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for eb2200d179c5d39808e2d3701a1d9793 in 22ms, sequenceid=275, compaction requested=true 2024-11-22T18:54:25,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:25,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eb2200d179c5d39808e2d3701a1d9793:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:54:25,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:25,737 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:54:25,738 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 178674 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:54:25,738 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1541): eb2200d179c5d39808e2d3701a1d9793/info is initiating minor compaction (all files) 2024-11-22T18:54:25,738 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb2200d179c5d39808e2d3701a1d9793/info in TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:25,738 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/0dbb5e486b424ae494f06e3d62b30a06, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e3baf80aee9240b3896930ad3532ad38, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/6e35238561e64bfd97d8b9910797cc2b] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp, totalSize=174.5 K 2024-11-22T18:54:25,739 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0dbb5e486b424ae494f06e3d62b30a06, keycount=130, bloomtype=ROW, size=142.6 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732301635270 2024-11-22T18:54:25,739 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting e3baf80aee9240b3896930ad3532ad38, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732301663680 2024-11-22T18:54:25,739 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6e35238561e64bfd97d8b9910797cc2b, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732301665693 2024-11-22T18:54:25,752 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb2200d179c5d39808e2d3701a1d9793#info#compaction#84 average throughput is 51.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:54:25,752 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/3464554df1344e52a62d3fd286730e2d is 1080, key is row0062/info:/1732301635270/Put/seqid=0 2024-11-22T18:54:25,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741870_1046 (size=168824) 2024-11-22T18:54:25,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741870_1046 (size=168824) 2024-11-22T18:54:25,770 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/3464554df1344e52a62d3fd286730e2d as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/3464554df1344e52a62d3fd286730e2d 2024-11-22T18:54:25,776 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eb2200d179c5d39808e2d3701a1d9793/info of eb2200d179c5d39808e2d3701a1d9793 into 3464554df1344e52a62d3fd286730e2d(size=164.9 K), total size for store is 164.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:54:25,776 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:25,776 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., storeName=eb2200d179c5d39808e2d3701a1d9793/info, priority=13, startTime=1732301665737; duration=0sec 2024-11-22T18:54:25,776 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:25,776 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb2200d179c5d39808e2d3701a1d9793:info 2024-11-22T18:54:26,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:26,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:27,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:27,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:27,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:27,737 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T18:54:27,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/e795c2454cda4a778665bd7f8f274db5 is 1080, key is row0213/info:/1732301665715/Put/seqid=0 2024-11-22T18:54:27,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741871_1047 (size=19013) 2024-11-22T18:54:27,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741871_1047 (size=19013) 2024-11-22T18:54:27,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/e795c2454cda4a778665bd7f8f274db5 2024-11-22T18:54:27,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/e795c2454cda4a778665bd7f8f274db5 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e795c2454cda4a778665bd7f8f274db5 2024-11-22T18:54:27,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e795c2454cda4a778665bd7f8f274db5, entries=13, sequenceid=292, filesize=18.6 K 2024-11-22T18:54:27,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=14.71 KB/15064 for eb2200d179c5d39808e2d3701a1d9793 in 24ms, sequenceid=292, compaction requested=false 2024-11-22T18:54:27,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:27,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-22T18:54:27,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/12eb4ff3b83d437fb031f21c1a609456 is 1080, key is row0226/info:/1732301667739/Put/seqid=0 2024-11-22T18:54:27,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741872_1048 (size=21171) 2024-11-22T18:54:27,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741872_1048 (size=21171) 2024-11-22T18:54:27,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/12eb4ff3b83d437fb031f21c1a609456 2024-11-22T18:54:27,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/12eb4ff3b83d437fb031f21c1a609456 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/12eb4ff3b83d437fb031f21c1a609456 2024-11-22T18:54:27,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-22T18:54:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38364 deadline: 1732301677787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934 2024-11-22T18:54:27,788 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., hostname=d79ba0c344fb,39949,1732301611934, seqNum=86 , the old value is region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., hostname=d79ba0c344fb,39949,1732301611934, seqNum=86, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T18:54:27,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/12eb4ff3b83d437fb031f21c1a609456, entries=15, sequenceid=310, filesize=20.7 K 2024-11-22T18:54:27,788 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., hostname=d79ba0c344fb,39949,1732301611934, seqNum=86 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=eb2200d179c5d39808e2d3701a1d9793, server=d79ba0c344fb,39949,1732301611934 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T18:54:27,788 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., hostname=d79ba0c344fb,39949,1732301611934, seqNum=86 because the exception is null or not the one we care about 2024-11-22T18:54:27,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for eb2200d179c5d39808e2d3701a1d9793 in 27ms, sequenceid=310, compaction requested=true 2024-11-22T18:54:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eb2200d179c5d39808e2d3701a1d9793:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T18:54:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:27,789 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T18:54:27,790 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 209008 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T18:54:27,790 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1541): eb2200d179c5d39808e2d3701a1d9793/info is initiating minor compaction (all files) 2024-11-22T18:54:27,790 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb2200d179c5d39808e2d3701a1d9793/info in TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:27,790 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/3464554df1344e52a62d3fd286730e2d, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e795c2454cda4a778665bd7f8f274db5, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/12eb4ff3b83d437fb031f21c1a609456] into tmpdir=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp, totalSize=204.1 K 2024-11-22T18:54:27,791 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3464554df1344e52a62d3fd286730e2d, keycount=151, bloomtype=ROW, size=164.9 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732301635270 2024-11-22T18:54:27,791 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting e795c2454cda4a778665bd7f8f274db5, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732301665715 2024-11-22T18:54:27,791 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] compactions.Compactor(225): Compacting 12eb4ff3b83d437fb031f21c1a609456, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732301667739 2024-11-22T18:54:27,802 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb2200d179c5d39808e2d3701a1d9793#info#compaction#87 average throughput is 61.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T18:54:27,803 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/eec23af50cfd4aaf8e57170d337fcd88 is 1080, key is row0062/info:/1732301635270/Put/seqid=0 2024-11-22T18:54:27,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741873_1049 (size=199146) 2024-11-22T18:54:27,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741873_1049 (size=199146) 2024-11-22T18:54:27,812 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/eec23af50cfd4aaf8e57170d337fcd88 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/eec23af50cfd4aaf8e57170d337fcd88 2024-11-22T18:54:27,818 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eb2200d179c5d39808e2d3701a1d9793/info of eb2200d179c5d39808e2d3701a1d9793 into eec23af50cfd4aaf8e57170d337fcd88(size=194.5 K), total size for store is 194.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T18:54:27,818 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:27,819 INFO [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., storeName=eb2200d179c5d39808e2d3701a1d9793/info, priority=13, startTime=1732301667789; duration=0sec 2024-11-22T18:54:27,819 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T18:54:27,819 DEBUG [RS:0;d79ba0c344fb:39949-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb2200d179c5d39808e2d3701a1d9793:info 2024-11-22T18:54:28,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:28,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:29,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:29,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:30,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:30,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:31,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:31,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:31,860 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T18:54:32,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:32,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:33,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:33,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:33,920 DEBUG [master/d79ba0c344fb:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-22T18:54:34,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:34,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:35,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:35,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:36,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:36,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:37,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:37,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:37,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39949 {}] regionserver.HRegion(8855): Flush requested on eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:37,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-22T18:54:37,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/285289645472489ba06b0cc885f67d4e is 1080, key is row0241/info:/1732301667763/Put/seqid=0 2024-11-22T18:54:37,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741874_1050 (size=21171) 2024-11-22T18:54:37,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741874_1050 (size=21171) 2024-11-22T18:54:37,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/285289645472489ba06b0cc885f67d4e 2024-11-22T18:54:37,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/285289645472489ba06b0cc885f67d4e as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/285289645472489ba06b0cc885f67d4e 2024-11-22T18:54:37,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/285289645472489ba06b0cc885f67d4e, entries=15, sequenceid=329, filesize=20.7 K 2024-11-22T18:54:37,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=1.05 KB/1076 for eb2200d179c5d39808e2d3701a1d9793 in 34ms, sequenceid=329, compaction requested=false 2024-11-22T18:54:37,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:38,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:38,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:39,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:39,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:39,811 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-22T18:54:39,812 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C39949%2C1732301611934.1732301679812 2024-11-22T18:54:39,827 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:39,828 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:39,828 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:39,828 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:39,828 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:39,828 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934/d79ba0c344fb%2C39949%2C1732301611934.1732301612360 with entries=311, filesize=307.82 KB; new WAL /user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934/d79ba0c344fb%2C39949%2C1732301611934.1732301679812 2024-11-22T18:54:39,829 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45179:45179),(127.0.0.1/127.0.0.1:33757:33757)] 2024-11-22T18:54:39,829 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934/d79ba0c344fb%2C39949%2C1732301611934.1732301612360 is not closed yet, will try archiving it next time 2024-11-22T18:54:39,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741833_1009 (size=315213) 2024-11-22T18:54:39,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741833_1009 (size=315213) 2024-11-22T18:54:39,833 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing eb2200d179c5d39808e2d3701a1d9793 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T18:54:39,837 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/ed64821b38624303ab52c5c296bfea0e is 1080, key is row0256/info:/1732301677810/Put/seqid=0 2024-11-22T18:54:39,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741876_1052 (size=6035) 2024-11-22T18:54:39,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741876_1052 (size=6035) 2024-11-22T18:54:39,843 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/ed64821b38624303ab52c5c296bfea0e 2024-11-22T18:54:39,848 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/.tmp/info/ed64821b38624303ab52c5c296bfea0e as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/ed64821b38624303ab52c5c296bfea0e 2024-11-22T18:54:39,853 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/ed64821b38624303ab52c5c296bfea0e, entries=1, sequenceid=333, filesize=5.9 K 2024-11-22T18:54:39,854 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for eb2200d179c5d39808e2d3701a1d9793 in 21ms, sequenceid=333, compaction requested=true 2024-11-22T18:54:39,854 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for eb2200d179c5d39808e2d3701a1d9793: 2024-11-22T18:54:39,854 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d4a9db853f3c66542587444426dd218d: 2024-11-22T18:54:39,854 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-22T18:54:39,858 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/info/c79c5301058342888f4df003c388dee9 is 186, key is TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d./info:regioninfo/1732301636017/Put/seqid=0 2024-11-22T18:54:39,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741877_1053 (size=6153) 2024-11-22T18:54:39,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741877_1053 (size=6153) 2024-11-22T18:54:39,872 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/info/c79c5301058342888f4df003c388dee9 2024-11-22T18:54:39,877 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/.tmp/info/c79c5301058342888f4df003c388dee9 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/info/c79c5301058342888f4df003c388dee9 2024-11-22T18:54:39,882 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/info/c79c5301058342888f4df003c388dee9, entries=5, sequenceid=21, filesize=6.0 K 2024-11-22T18:54:39,883 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=21, compaction requested=false 2024-11-22T18:54:39,883 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T18:54:39,883 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C39949%2C1732301611934.1732301679883 2024-11-22T18:54:39,888 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:39,888 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:39,888 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:39,888 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:39,888 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:39,888 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934/d79ba0c344fb%2C39949%2C1732301611934.1732301679812 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934/d79ba0c344fb%2C39949%2C1732301611934.1732301679883 2024-11-22T18:54:39,889 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45179:45179),(127.0.0.1/127.0.0.1:33757:33757)] 2024-11-22T18:54:39,889 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934/d79ba0c344fb%2C39949%2C1732301611934.1732301679812 is not closed yet, will try archiving it next time 2024-11-22T18:54:39,889 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934/d79ba0c344fb%2C39949%2C1732301611934.1732301612360 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/oldWALs/d79ba0c344fb%2C39949%2C1732301611934.1732301612360 2024-11-22T18:54:39,890 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T18:54:39,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741875_1051 (size=731) 2024-11-22T18:54:39,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741875_1051 (size=731) 2024-11-22T18:54:39,891 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/WALs/d79ba0c344fb,39949,1732301611934/d79ba0c344fb%2C39949%2C1732301611934.1732301679812 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/oldWALs/d79ba0c344fb%2C39949%2C1732301611934.1732301679812 2024-11-22T18:54:39,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T18:54:39,990 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:54:39,990 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:54:39,990 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:39,991 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:39,991 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T18:54:39,991 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T18:54:39,991 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=39837571, stopped=false 2024-11-22T18:54:39,991 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d79ba0c344fb,42005,1732301611881 2024-11-22T18:54:39,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:54:39,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:54:39,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:39,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:39,994 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:54:39,994 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:54:39,994 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:54:39,994 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:39,994 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd79ba0c344fb,39949,1732301611934' ***** 2024-11-22T18:54:39,994 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T18:54:39,994 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:54:39,994 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:54:39,995 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T18:54:39,995 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T18:54:39,995 INFO [RS:0;d79ba0c344fb:39949 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T18:54:39,995 INFO [RS:0;d79ba0c344fb:39949 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T18:54:39,996 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(3091): Received CLOSE for eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:39,996 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(3091): Received CLOSE for d4a9db853f3c66542587444426dd218d 2024-11-22T18:54:39,996 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(959): stopping server d79ba0c344fb,39949,1732301611934 2024-11-22T18:54:39,996 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:54:39,996 INFO [RS:0;d79ba0c344fb:39949 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d79ba0c344fb:39949. 2024-11-22T18:54:39,996 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing eb2200d179c5d39808e2d3701a1d9793, disabling compactions & flushes 2024-11-22T18:54:39,996 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:39,996 DEBUG [RS:0;d79ba0c344fb:39949 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:54:39,996 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:39,996 DEBUG [RS:0;d79ba0c344fb:39949 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:39,996 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. after waiting 0 ms 2024-11-22T18:54:39,996 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:39,996 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T18:54:39,996 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T18:54:39,996 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T18:54:39,996 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T18:54:39,996 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-22T18:54:39,996 DEBUG [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(1325): Online Regions={eb2200d179c5d39808e2d3701a1d9793=TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793., d4a9db853f3c66542587444426dd218d=TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T18:54:39,996 DEBUG [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d4a9db853f3c66542587444426dd218d, eb2200d179c5d39808e2d3701a1d9793 2024-11-22T18:54:39,996 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:54:39,996 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:54:39,997 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:54:39,997 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:54:39,997 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:54:39,996 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8->hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/4c84add3851d4757b1771807dceb6fbf-top, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/c1c6ca6e80694a4090eb0aae45a9eb56, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c49c4a8d0dd481ea2abf8e77746d507, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8c14311808fe425986154cfbd713ca2b, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7b003ec62e014d909409887ad666dd65, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/452b6c39b22643aaad16b79ea6724845, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fac58280b5d842d880152f8e3c2ce761, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/70008b0af8fe49af82c75e4a2f60f0a9, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/cde59477ed7e455aa43b4b355dab9b31, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/a081c0cf14d3498fac6d9d46bd8af609, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fcaeeefe7e974664be56a58547950174, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7af1c79ac2da4d1fb087e27df9a6b33c, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8974cc4e2a2c490287c28679c17fca02, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/46f1bd73358641de89d17f915e7b5548, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8f24774e2e244fdc971572f214d11cd9, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/0dbb5e486b424ae494f06e3d62b30a06, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/130c4697b80a4e688c88ad864ced952a, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e3baf80aee9240b3896930ad3532ad38, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/3464554df1344e52a62d3fd286730e2d, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/6e35238561e64bfd97d8b9910797cc2b, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e795c2454cda4a778665bd7f8f274db5, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/12eb4ff3b83d437fb031f21c1a609456] to archive 2024-11-22T18:54:39,998 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T18:54:40,000 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:54:40,001 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/c1c6ca6e80694a4090eb0aae45a9eb56 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/c1c6ca6e80694a4090eb0aae45a9eb56 2024-11-22T18:54:40,002 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-22T18:54:40,002 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:54:40,002 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:54:40,002 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301679996Running coprocessor pre-close hooks at 1732301679996Disabling compacts and flushes for region at 1732301679996Disabling writes for close at 1732301679997 (+1 ms)Writing region close event to WAL at 1732301679998 (+1 ms)Running coprocessor post-close hooks at 1732301680002 (+4 ms)Closed at 1732301680002 2024-11-22T18:54:40,002 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c49c4a8d0dd481ea2abf8e77746d507 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/4c49c4a8d0dd481ea2abf8e77746d507 2024-11-22T18:54:40,002 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T18:54:40,004 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8c14311808fe425986154cfbd713ca2b to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8c14311808fe425986154cfbd713ca2b 2024-11-22T18:54:40,005 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7b003ec62e014d909409887ad666dd65 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7b003ec62e014d909409887ad666dd65 2024-11-22T18:54:40,006 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/452b6c39b22643aaad16b79ea6724845 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/452b6c39b22643aaad16b79ea6724845 2024-11-22T18:54:40,007 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fac58280b5d842d880152f8e3c2ce761 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fac58280b5d842d880152f8e3c2ce761 2024-11-22T18:54:40,009 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/70008b0af8fe49af82c75e4a2f60f0a9 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/70008b0af8fe49af82c75e4a2f60f0a9 2024-11-22T18:54:40,010 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/cde59477ed7e455aa43b4b355dab9b31 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/cde59477ed7e455aa43b4b355dab9b31 2024-11-22T18:54:40,011 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/a081c0cf14d3498fac6d9d46bd8af609 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/a081c0cf14d3498fac6d9d46bd8af609 2024-11-22T18:54:40,012 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fcaeeefe7e974664be56a58547950174 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/fcaeeefe7e974664be56a58547950174 2024-11-22T18:54:40,013 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7af1c79ac2da4d1fb087e27df9a6b33c to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/7af1c79ac2da4d1fb087e27df9a6b33c 2024-11-22T18:54:40,015 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8974cc4e2a2c490287c28679c17fca02 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8974cc4e2a2c490287c28679c17fca02 2024-11-22T18:54:40,016 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/46f1bd73358641de89d17f915e7b5548 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/46f1bd73358641de89d17f915e7b5548 2024-11-22T18:54:40,017 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8f24774e2e244fdc971572f214d11cd9 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/8f24774e2e244fdc971572f214d11cd9 2024-11-22T18:54:40,018 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/0dbb5e486b424ae494f06e3d62b30a06 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/0dbb5e486b424ae494f06e3d62b30a06 2024-11-22T18:54:40,020 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/130c4697b80a4e688c88ad864ced952a to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/130c4697b80a4e688c88ad864ced952a 2024-11-22T18:54:40,021 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e3baf80aee9240b3896930ad3532ad38 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e3baf80aee9240b3896930ad3532ad38 2024-11-22T18:54:40,022 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/3464554df1344e52a62d3fd286730e2d to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/3464554df1344e52a62d3fd286730e2d 2024-11-22T18:54:40,023 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/6e35238561e64bfd97d8b9910797cc2b to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/6e35238561e64bfd97d8b9910797cc2b 2024-11-22T18:54:40,024 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e795c2454cda4a778665bd7f8f274db5 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/e795c2454cda4a778665bd7f8f274db5 2024-11-22T18:54:40,026 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/12eb4ff3b83d437fb031f21c1a609456 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/info/12eb4ff3b83d437fb031f21c1a609456 2024-11-22T18:54:40,026 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d79ba0c344fb:42005 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-22T18:54:40,027 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c1c6ca6e80694a4090eb0aae45a9eb56=8260, 4c49c4a8d0dd481ea2abf8e77746d507=12509, 8c14311808fe425986154cfbd713ca2b=35349, 7b003ec62e014d909409887ad666dd65=24376, 452b6c39b22643aaad16b79ea6724845=13587, fac58280b5d842d880152f8e3c2ce761=57012, 70008b0af8fe49af82c75e4a2f60f0a9=17906, cde59477ed7e455aa43b4b355dab9b31=22238, a081c0cf14d3498fac6d9d46bd8af609=82045, fcaeeefe7e974664be56a58547950174=12516, 7af1c79ac2da4d1fb087e27df9a6b33c=19000, 8974cc4e2a2c490287c28679c17fca02=113417, 46f1bd73358641de89d17f915e7b5548=22238, 8f24774e2e244fdc971572f214d11cd9=20078, 0dbb5e486b424ae494f06e3d62b30a06=146064, 130c4697b80a4e688c88ad864ced952a=22238, e3baf80aee9240b3896930ad3532ad38=12518, 3464554df1344e52a62d3fd286730e2d=168824, 6e35238561e64bfd97d8b9910797cc2b=20092, e795c2454cda4a778665bd7f8f274db5=19013, 12eb4ff3b83d437fb031f21c1a609456=21171] 2024-11-22T18:54:40,030 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/eb2200d179c5d39808e2d3701a1d9793/recovered.edits/336.seqid, newMaxSeqId=336, maxSeqId=85 2024-11-22T18:54:40,031 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:40,031 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for eb2200d179c5d39808e2d3701a1d9793: Waiting for close lock at 1732301679996Running coprocessor pre-close hooks at 1732301679996Disabling compacts and flushes for region at 1732301679996Disabling writes for close at 1732301679996Writing region close event to WAL at 1732301680027 (+31 ms)Running coprocessor post-close hooks at 1732301680031 (+4 ms)Closed at 1732301680031 2024-11-22T18:54:40,031 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732301635328.eb2200d179c5d39808e2d3701a1d9793. 2024-11-22T18:54:40,031 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d4a9db853f3c66542587444426dd218d, disabling compactions & flushes 2024-11-22T18:54:40,031 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. 2024-11-22T18:54:40,031 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. 2024-11-22T18:54:40,031 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. after waiting 0 ms 2024-11-22T18:54:40,031 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. 2024-11-22T18:54:40,032 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8->hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/c4f64eb1dac983aa8e376c1d005981a8/info/4c84add3851d4757b1771807dceb6fbf-bottom] to archive 2024-11-22T18:54:40,033 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T18:54:40,034 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8 to hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/archive/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/info/4c84add3851d4757b1771807dceb6fbf.c4f64eb1dac983aa8e376c1d005981a8 2024-11-22T18:54:40,034 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-22T18:54:40,037 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/data/default/TestLogRolling-testLogRolling/d4a9db853f3c66542587444426dd218d/recovered.edits/90.seqid, newMaxSeqId=90, maxSeqId=85 2024-11-22T18:54:40,038 INFO [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. 2024-11-22T18:54:40,038 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d4a9db853f3c66542587444426dd218d: Waiting for close lock at 1732301680031Running coprocessor pre-close hooks at 1732301680031Disabling compacts and flushes for region at 1732301680031Disabling writes for close at 1732301680031Writing region close event to WAL at 1732301680034 (+3 ms)Running coprocessor post-close hooks at 1732301680038 (+4 ms)Closed at 1732301680038 2024-11-22T18:54:40,038 DEBUG [RS_CLOSE_REGION-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732301635328.d4a9db853f3c66542587444426dd218d. 2024-11-22T18:54:40,197 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(976): stopping server d79ba0c344fb,39949,1732301611934; all regions closed. 2024-11-22T18:54:40,197 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,197 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,197 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,197 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,197 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741834_1010 (size=8107) 2024-11-22T18:54:40,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741834_1010 (size=8107) 2024-11-22T18:54:40,202 DEBUG [RS:0;d79ba0c344fb:39949 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/oldWALs 2024-11-22T18:54:40,202 INFO [RS:0;d79ba0c344fb:39949 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C39949%2C1732301611934.meta:.meta(num 1732301612786) 2024-11-22T18:54:40,202 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,203 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,203 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,203 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,203 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741878_1054 (size=778) 2024-11-22T18:54:40,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741878_1054 (size=778) 2024-11-22T18:54:40,207 DEBUG [RS:0;d79ba0c344fb:39949 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/oldWALs 2024-11-22T18:54:40,207 INFO [RS:0;d79ba0c344fb:39949 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C39949%2C1732301611934:(num 1732301679883) 2024-11-22T18:54:40,207 DEBUG [RS:0;d79ba0c344fb:39949 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:40,207 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:54:40,207 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:54:40,207 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.ChoreService(370): Chore service for: regionserver/d79ba0c344fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T18:54:40,207 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:54:40,207 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:54:40,207 INFO [RS:0;d79ba0c344fb:39949 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39949 2024-11-22T18:54:40,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d79ba0c344fb,39949,1732301611934 2024-11-22T18:54:40,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:54:40,210 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:54:40,210 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d79ba0c344fb,39949,1732301611934] 2024-11-22T18:54:40,213 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d79ba0c344fb,39949,1732301611934 already deleted, retry=false 2024-11-22T18:54:40,213 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d79ba0c344fb,39949,1732301611934 expired; onlineServers=0 2024-11-22T18:54:40,213 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd79ba0c344fb,42005,1732301611881' ***** 2024-11-22T18:54:40,213 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T18:54:40,213 INFO [M:0;d79ba0c344fb:42005 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:54:40,213 INFO [M:0;d79ba0c344fb:42005 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:54:40,213 DEBUG [M:0;d79ba0c344fb:42005 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T18:54:40,214 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T18:54:40,214 DEBUG [M:0;d79ba0c344fb:42005 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T18:54:40,214 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301612144 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301612144,5,FailOnTimeoutGroup] 2024-11-22T18:54:40,214 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301612141 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301612141,5,FailOnTimeoutGroup] 2024-11-22T18:54:40,214 INFO [M:0;d79ba0c344fb:42005 {}] hbase.ChoreService(370): Chore service for: master/d79ba0c344fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T18:54:40,214 INFO [M:0;d79ba0c344fb:42005 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:54:40,214 DEBUG [M:0;d79ba0c344fb:42005 {}] master.HMaster(1795): Stopping service threads 2024-11-22T18:54:40,214 INFO [M:0;d79ba0c344fb:42005 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T18:54:40,214 INFO [M:0;d79ba0c344fb:42005 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:54:40,214 INFO [M:0;d79ba0c344fb:42005 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T18:54:40,214 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T18:54:40,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T18:54:40,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:40,215 DEBUG [M:0;d79ba0c344fb:42005 {}] zookeeper.ZKUtil(347): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T18:54:40,215 WARN [M:0;d79ba0c344fb:42005 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T18:54:40,216 INFO [M:0;d79ba0c344fb:42005 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/.lastflushedseqids 2024-11-22T18:54:40,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741879_1055 (size=228) 2024-11-22T18:54:40,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741879_1055 (size=228) 2024-11-22T18:54:40,222 INFO [M:0;d79ba0c344fb:42005 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T18:54:40,222 INFO [M:0;d79ba0c344fb:42005 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T18:54:40,222 DEBUG [M:0;d79ba0c344fb:42005 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:54:40,223 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:40,223 DEBUG [M:0;d79ba0c344fb:42005 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:40,223 DEBUG [M:0;d79ba0c344fb:42005 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:54:40,223 DEBUG [M:0;d79ba0c344fb:42005 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:40,223 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-22T18:54:40,224 INFO [regionserver/d79ba0c344fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:54:40,241 DEBUG [M:0;d79ba0c344fb:42005 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d221d2f13d894becb184e1ee63071aca is 82, key is hbase:meta,,1/info:regioninfo/1732301612829/Put/seqid=0 2024-11-22T18:54:40,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741880_1056 (size=5672) 2024-11-22T18:54:40,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741880_1056 (size=5672) 2024-11-22T18:54:40,246 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d221d2f13d894becb184e1ee63071aca 2024-11-22T18:54:40,268 DEBUG [M:0;d79ba0c344fb:42005 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/64b726269e03410a99d394c05fdbd4c0 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732301613348/Put/seqid=0 2024-11-22T18:54:40,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741881_1057 (size=7090) 2024-11-22T18:54:40,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741881_1057 (size=7090) 2024-11-22T18:54:40,273 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/64b726269e03410a99d394c05fdbd4c0 2024-11-22T18:54:40,278 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 64b726269e03410a99d394c05fdbd4c0 2024-11-22T18:54:40,292 DEBUG [M:0;d79ba0c344fb:42005 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9cb67cd7eda4e43ad0ff7854aa4ac6d is 69, key is d79ba0c344fb,39949,1732301611934/rs:state/1732301612193/Put/seqid=0 2024-11-22T18:54:40,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741882_1058 (size=5156) 2024-11-22T18:54:40,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741882_1058 (size=5156) 2024-11-22T18:54:40,297 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9cb67cd7eda4e43ad0ff7854aa4ac6d 2024-11-22T18:54:40,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:54:40,311 INFO [RS:0;d79ba0c344fb:39949 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:54:40,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39949-0x1014106cde60001, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:54:40,311 INFO [RS:0;d79ba0c344fb:39949 {}] regionserver.HRegionServer(1031): Exiting; stopping=d79ba0c344fb,39949,1732301611934; zookeeper connection closed. 2024-11-22T18:54:40,312 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2c0d3f0e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2c0d3f0e 2024-11-22T18:54:40,312 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T18:54:40,314 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=5, created chunk count=9, reused chunk count=71, reuseRatio=88.75% 2024-11-22T18:54:40,314 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-22T18:54:40,317 DEBUG [M:0;d79ba0c344fb:42005 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4433df1bd71743439e44e13ea69e99e0 is 52, key is load_balancer_on/state:d/1732301612965/Put/seqid=0 2024-11-22T18:54:40,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741883_1059 (size=5056) 2024-11-22T18:54:40,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741883_1059 (size=5056) 2024-11-22T18:54:40,323 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4433df1bd71743439e44e13ea69e99e0 2024-11-22T18:54:40,327 DEBUG [M:0;d79ba0c344fb:42005 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d221d2f13d894becb184e1ee63071aca as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d221d2f13d894becb184e1ee63071aca 2024-11-22T18:54:40,331 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d221d2f13d894becb184e1ee63071aca, entries=8, sequenceid=125, filesize=5.5 K 2024-11-22T18:54:40,332 DEBUG [M:0;d79ba0c344fb:42005 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/64b726269e03410a99d394c05fdbd4c0 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/64b726269e03410a99d394c05fdbd4c0 2024-11-22T18:54:40,336 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 64b726269e03410a99d394c05fdbd4c0 2024-11-22T18:54:40,336 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/64b726269e03410a99d394c05fdbd4c0, entries=13, sequenceid=125, filesize=6.9 K 2024-11-22T18:54:40,337 DEBUG [M:0;d79ba0c344fb:42005 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9cb67cd7eda4e43ad0ff7854aa4ac6d as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c9cb67cd7eda4e43ad0ff7854aa4ac6d 2024-11-22T18:54:40,341 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c9cb67cd7eda4e43ad0ff7854aa4ac6d, entries=1, sequenceid=125, filesize=5.0 K 2024-11-22T18:54:40,342 DEBUG [M:0;d79ba0c344fb:42005 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4433df1bd71743439e44e13ea69e99e0 as hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4433df1bd71743439e44e13ea69e99e0 2024-11-22T18:54:40,346 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39409/user/jenkins/test-data/80538d40-0ee1-a9b8-02eb-10b098add993/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4433df1bd71743439e44e13ea69e99e0, entries=1, sequenceid=125, filesize=4.9 K 2024-11-22T18:54:40,347 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=125, compaction requested=false 2024-11-22T18:54:40,348 INFO [M:0;d79ba0c344fb:42005 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:40,349 DEBUG [M:0;d79ba0c344fb:42005 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301680222Disabling compacts and flushes for region at 1732301680222Disabling writes for close at 1732301680223 (+1 ms)Obtaining lock to block concurrent updates at 1732301680223Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732301680223Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1732301680223Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732301680224 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732301680224Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732301680240 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732301680240Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732301680252 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732301680268 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732301680268Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732301680278 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732301680292 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732301680292Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732301680301 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732301680317 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732301680317Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ce06d4b: reopening flushed file at 1732301680326 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@705e56d6: reopening flushed file at 1732301680331 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@579a1a11: reopening flushed file at 1732301680336 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@193b8aed: reopening flushed file at 1732301680341 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=125, compaction requested=false at 1732301680347 (+6 ms)Writing region close event to WAL at 1732301680348 (+1 ms)Closed at 1732301680348 2024-11-22T18:54:40,349 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,349 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,349 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,349 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,349 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:40,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38581 is added to blk_1073741830_1006 (size=61320) 2024-11-22T18:54:40,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33065 is added to blk_1073741830_1006 (size=61320) 2024-11-22T18:54:40,352 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:54:40,352 INFO [M:0;d79ba0c344fb:42005 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T18:54:40,352 INFO [M:0;d79ba0c344fb:42005 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42005 2024-11-22T18:54:40,352 INFO [M:0;d79ba0c344fb:42005 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:54:40,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:40,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:40,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:54:40,454 INFO [M:0;d79ba0c344fb:42005 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:54:40,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42005-0x1014106cde60000, quorum=127.0.0.1:55954, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:54:40,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f63b03b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:54:40,458 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f1304aa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:54:40,458 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:54:40,458 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15c8c411{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:54:40,458 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@277e18bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/hadoop.log.dir/,STOPPED} 2024-11-22T18:54:40,459 WARN [BP-736412378-172.17.0.2-1732301610992 heartbeating to localhost/127.0.0.1:39409 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:54:40,459 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:54:40,459 WARN [BP-736412378-172.17.0.2-1732301610992 heartbeating to localhost/127.0.0.1:39409 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-736412378-172.17.0.2-1732301610992 (Datanode Uuid 54919de8-122d-4a60-9ad5-271a06ea772a) service to localhost/127.0.0.1:39409 2024-11-22T18:54:40,460 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:54:40,460 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/data/data3/current/BP-736412378-172.17.0.2-1732301610992 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:54:40,460 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/data/data4/current/BP-736412378-172.17.0.2-1732301610992 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:54:40,461 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:54:40,473 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2124b505{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:54:40,473 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@285e4e67{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:54:40,473 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:54:40,474 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bb90d86{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:54:40,474 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47b62c1f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/hadoop.log.dir/,STOPPED} 2024-11-22T18:54:40,475 WARN [BP-736412378-172.17.0.2-1732301610992 heartbeating to localhost/127.0.0.1:39409 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:54:40,475 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:54:40,475 WARN [BP-736412378-172.17.0.2-1732301610992 heartbeating to localhost/127.0.0.1:39409 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-736412378-172.17.0.2-1732301610992 (Datanode Uuid da17622b-a6b7-407a-bf5b-666b80470297) service to localhost/127.0.0.1:39409 2024-11-22T18:54:40,475 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:54:40,476 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/data/data1/current/BP-736412378-172.17.0.2-1732301610992 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:54:40,476 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/cluster_e4920b2c-f78f-34f5-7ef0-b819df0e3aef/data/data2/current/BP-736412378-172.17.0.2-1732301610992 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:54:40,476 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:54:40,484 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38dc0fd7{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:54:40,484 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43bec0c2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:54:40,484 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:54:40,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a933e33{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:54:40,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75ee0a96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/hadoop.log.dir/,STOPPED} 2024-11-22T18:54:40,493 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T18:54:40,523 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T18:54:40,534 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=227 (was 206) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39409 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39409 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39409 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39409 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:39409 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39409 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39409 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39409 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=503 (was 481) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=149 (was 99) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7081 (was 7578) 2024-11-22T18:54:40,543 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=227, OpenFileDescriptor=503, MaxFileDescriptor=1048576, SystemLoadAverage=149, ProcessCount=11, AvailableMemoryMB=7081 2024-11-22T18:54:40,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T18:54:40,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/hadoop.log.dir so I do NOT create it in target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157 2024-11-22T18:54:40,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/43cfde25-2a66-b21f-b86f-ce7e62e43700/hadoop.tmp.dir so I do NOT create it in target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157 2024-11-22T18:54:40,544 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310, deleteOnExit=true 2024-11-22T18:54:40,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T18:54:40,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/test.cache.data in system properties and HBase conf 2024-11-22T18:54:40,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T18:54:40,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/hadoop.log.dir in system properties and HBase conf 2024-11-22T18:54:40,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T18:54:40,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T18:54:40,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T18:54:40,544 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/nfs.dump.dir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/java.io.tmpdir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T18:54:40,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T18:54:40,559 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:54:40,619 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:54:40,622 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:54:40,624 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:54:40,624 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:54:40,624 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:54:40,624 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:54:40,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fee3652{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:54:40,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@412c5a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:54:40,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4bedee20{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/java.io.tmpdir/jetty-localhost-46067-hadoop-hdfs-3_4_1-tests_jar-_-any-14818506460997284630/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:54:40,743 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27862096{HTTP/1.1, (http/1.1)}{localhost:46067} 2024-11-22T18:54:40,743 INFO [Time-limited test {}] server.Server(415): Started @306132ms 2024-11-22T18:54:40,757 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T18:54:40,814 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:54:40,816 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:54:40,817 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:54:40,817 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:54:40,817 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T18:54:40,818 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5be97557{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:54:40,818 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10c1adfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:54:40,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@150dab73{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/java.io.tmpdir/jetty-localhost-40477-hadoop-hdfs-3_4_1-tests_jar-_-any-7070015165637547339/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:54:40,939 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6486a7e6{HTTP/1.1, (http/1.1)}{localhost:40477} 2024-11-22T18:54:40,939 INFO [Time-limited test {}] server.Server(415): Started @306329ms 2024-11-22T18:54:40,941 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:54:40,974 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T18:54:40,981 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T18:54:40,982 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T18:54:40,982 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T18:54:40,982 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T18:54:40,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b825638{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/hadoop.log.dir/,AVAILABLE} 2024-11-22T18:54:40,983 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1926aa54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T18:54:41,058 WARN [Thread-2475 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/data/data1/current/BP-1434225187-172.17.0.2-1732301680564/current, will proceed with Du for space computation calculation, 2024-11-22T18:54:41,059 WARN [Thread-2476 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/data/data2/current/BP-1434225187-172.17.0.2-1732301680564/current, will proceed with Du for space computation calculation, 2024-11-22T18:54:41,085 WARN [Thread-2454 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:54:41,087 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x222a3b2472af2051 with lease ID 0xa6c7153cf1a83122: Processing first storage report for DS-a4302202-df8b-41a4-a011-0e1b25024758 from datanode DatanodeRegistration(127.0.0.1:42147, datanodeUuid=c255f87f-5ee4-4f1a-9c1a-78503d585de7, infoPort=32937, infoSecurePort=0, ipcPort=40057, storageInfo=lv=-57;cid=testClusterID;nsid=478081169;c=1732301680564) 2024-11-22T18:54:41,087 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x222a3b2472af2051 with lease ID 0xa6c7153cf1a83122: from storage DS-a4302202-df8b-41a4-a011-0e1b25024758 node DatanodeRegistration(127.0.0.1:42147, datanodeUuid=c255f87f-5ee4-4f1a-9c1a-78503d585de7, infoPort=32937, infoSecurePort=0, ipcPort=40057, storageInfo=lv=-57;cid=testClusterID;nsid=478081169;c=1732301680564), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:54:41,087 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x222a3b2472af2051 with lease ID 0xa6c7153cf1a83122: Processing first storage report for DS-b738df90-b3bb-45f9-b2a7-a71570196a74 from datanode DatanodeRegistration(127.0.0.1:42147, datanodeUuid=c255f87f-5ee4-4f1a-9c1a-78503d585de7, infoPort=32937, infoSecurePort=0, ipcPort=40057, storageInfo=lv=-57;cid=testClusterID;nsid=478081169;c=1732301680564) 2024-11-22T18:54:41,087 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x222a3b2472af2051 with lease ID 0xa6c7153cf1a83122: from storage DS-b738df90-b3bb-45f9-b2a7-a71570196a74 node DatanodeRegistration(127.0.0.1:42147, datanodeUuid=c255f87f-5ee4-4f1a-9c1a-78503d585de7, infoPort=32937, infoSecurePort=0, ipcPort=40057, storageInfo=lv=-57;cid=testClusterID;nsid=478081169;c=1732301680564), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:54:41,114 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@380ffe40{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/java.io.tmpdir/jetty-localhost-34127-hadoop-hdfs-3_4_1-tests_jar-_-any-14258690370589460954/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:54:41,114 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e35321a{HTTP/1.1, (http/1.1)}{localhost:34127} 2024-11-22T18:54:41,114 INFO [Time-limited test {}] server.Server(415): Started @306503ms 2024-11-22T18:54:41,115 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T18:54:41,220 WARN [Thread-2501 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/data/data3/current/BP-1434225187-172.17.0.2-1732301680564/current, will proceed with Du for space computation calculation, 2024-11-22T18:54:41,220 WARN [Thread-2502 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/data/data4/current/BP-1434225187-172.17.0.2-1732301680564/current, will proceed with Du for space computation calculation, 2024-11-22T18:54:41,242 WARN [Thread-2490 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T18:54:41,244 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x51e49a6decd69b7a with lease ID 0xa6c7153cf1a83123: Processing first storage report for DS-bcee828c-a1c7-4955-9fb6-2091c588860a from datanode DatanodeRegistration(127.0.0.1:46719, datanodeUuid=e6f51d81-1bd8-4489-8468-37e778d978d3, infoPort=45613, infoSecurePort=0, ipcPort=38645, storageInfo=lv=-57;cid=testClusterID;nsid=478081169;c=1732301680564) 2024-11-22T18:54:41,244 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x51e49a6decd69b7a with lease ID 0xa6c7153cf1a83123: from storage DS-bcee828c-a1c7-4955-9fb6-2091c588860a node DatanodeRegistration(127.0.0.1:46719, datanodeUuid=e6f51d81-1bd8-4489-8468-37e778d978d3, infoPort=45613, infoSecurePort=0, ipcPort=38645, storageInfo=lv=-57;cid=testClusterID;nsid=478081169;c=1732301680564), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:54:41,244 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x51e49a6decd69b7a with lease ID 0xa6c7153cf1a83123: Processing first storage report for DS-b62df1df-f92f-40bc-9c23-b127d288ea13 from datanode DatanodeRegistration(127.0.0.1:46719, datanodeUuid=e6f51d81-1bd8-4489-8468-37e778d978d3, infoPort=45613, infoSecurePort=0, ipcPort=38645, storageInfo=lv=-57;cid=testClusterID;nsid=478081169;c=1732301680564) 2024-11-22T18:54:41,244 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x51e49a6decd69b7a with lease ID 0xa6c7153cf1a83123: from storage DS-b62df1df-f92f-40bc-9c23-b127d288ea13 node DatanodeRegistration(127.0.0.1:46719, datanodeUuid=e6f51d81-1bd8-4489-8468-37e778d978d3, infoPort=45613, infoSecurePort=0, ipcPort=38645, storageInfo=lv=-57;cid=testClusterID;nsid=478081169;c=1732301680564), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T18:54:41,340 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157 2024-11-22T18:54:41,342 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/zookeeper_0, clientPort=53097, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T18:54:41,343 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53097 2024-11-22T18:54:41,344 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:54:41,345 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:54:41,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:54:41,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741825_1001 (size=7) 2024-11-22T18:54:41,355 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a with version=8 2024-11-22T18:54:41,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44137/user/jenkins/test-data/c1c6d2b9-0042-e58c-876f-2ad73d56cc6c/hbase-staging 2024-11-22T18:54:41,357 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:54:41,357 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:54:41,357 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:54:41,357 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:54:41,357 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:54:41,357 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:54:41,357 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T18:54:41,357 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:54:41,358 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40297 2024-11-22T18:54:41,359 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40297 connecting to ZooKeeper ensemble=127.0.0.1:53097 2024-11-22T18:54:41,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:402970x0, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:54:41,369 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40297-0x1014107dd4e0000 connected 2024-11-22T18:54:41,407 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:54:41,409 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:54:41,411 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:54:41,411 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a, hbase.cluster.distributed=false 2024-11-22T18:54:41,415 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:54:41,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40297 2024-11-22T18:54:41,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40297 2024-11-22T18:54:41,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40297 2024-11-22T18:54:41,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40297 2024-11-22T18:54:41,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40297 2024-11-22T18:54:41,436 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d79ba0c344fb:0 server-side Connection retries=45 2024-11-22T18:54:41,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:54:41,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T18:54:41,436 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T18:54:41,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T18:54:41,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T18:54:41,436 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T18:54:41,436 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T18:54:41,437 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36373 2024-11-22T18:54:41,438 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36373 connecting to ZooKeeper ensemble=127.0.0.1:53097 2024-11-22T18:54:41,439 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:54:41,441 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:54:41,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:363730x0, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T18:54:41,447 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:363730x0, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:54:41,448 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T18:54:41,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:41,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:41,452 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36373-0x1014107dd4e0001 connected 2024-11-22T18:54:41,460 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T18:54:41,461 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T18:54:41,462 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T18:54:41,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36373 2024-11-22T18:54:41,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36373 2024-11-22T18:54:41,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36373 2024-11-22T18:54:41,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36373 2024-11-22T18:54:41,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36373 2024-11-22T18:54:41,489 DEBUG [M:0;d79ba0c344fb:40297 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d79ba0c344fb:40297 2024-11-22T18:54:41,490 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d79ba0c344fb,40297,1732301681357 2024-11-22T18:54:41,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:54:41,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:54:41,494 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d79ba0c344fb,40297,1732301681357 2024-11-22T18:54:41,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T18:54:41,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:41,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:41,498 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T18:54:41,499 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d79ba0c344fb,40297,1732301681357 from backup master directory 2024-11-22T18:54:41,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d79ba0c344fb,40297,1732301681357 2024-11-22T18:54:41,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:54:41,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T18:54:41,501 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:54:41,501 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d79ba0c344fb,40297,1732301681357 2024-11-22T18:54:41,505 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/hbase.id] with ID: 2c217b7f-2b07-4a6c-a44c-8ea4ecdebd89 2024-11-22T18:54:41,505 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/.tmp/hbase.id 2024-11-22T18:54:41,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:54:41,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741826_1002 (size=42) 2024-11-22T18:54:41,513 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/.tmp/hbase.id]:[hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/hbase.id] 2024-11-22T18:54:41,526 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:54:41,526 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T18:54:41,528 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T18:54:41,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:41,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:41,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:54:41,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741827_1003 (size=196) 2024-11-22T18:54:41,539 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T18:54:41,540 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T18:54:41,540 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:54:41,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:54:41,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741828_1004 (size=1189) 2024-11-22T18:54:41,548 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store 2024-11-22T18:54:41,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:54:41,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741829_1005 (size=34) 2024-11-22T18:54:41,555 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:54:41,555 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:54:41,555 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:41,555 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:41,555 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:54:41,555 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:41,555 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:41,555 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301681555Disabling compacts and flushes for region at 1732301681555Disabling writes for close at 1732301681555Writing region close event to WAL at 1732301681555Closed at 1732301681555 2024-11-22T18:54:41,556 WARN [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/.initializing 2024-11-22T18:54:41,556 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/WALs/d79ba0c344fb,40297,1732301681357 2024-11-22T18:54:41,559 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C40297%2C1732301681357, suffix=, logDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/WALs/d79ba0c344fb,40297,1732301681357, archiveDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/oldWALs, maxLogs=10 2024-11-22T18:54:41,559 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C40297%2C1732301681357.1732301681559 2024-11-22T18:54:41,585 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/WALs/d79ba0c344fb,40297,1732301681357/d79ba0c344fb%2C40297%2C1732301681357.1732301681559 2024-11-22T18:54:41,586 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32937:32937),(127.0.0.1/127.0.0.1:45613:45613)] 2024-11-22T18:54:41,587 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:54:41,587 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:54:41,588 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,588 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,589 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T18:54:41,591 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:41,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:54:41,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T18:54:41,593 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:41,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:54:41,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T18:54:41,595 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:41,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:54:41,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T18:54:41,597 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:41,597 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T18:54:41,597 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,598 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,598 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,599 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,599 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,600 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T18:54:41,601 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T18:54:41,603 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:54:41,603 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774059, jitterRate=-0.015733465552330017}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T18:54:41,604 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732301681588Initializing all the Stores at 1732301681589 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301681589Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301681589Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301681589Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301681589Cleaning up temporary data from old regions at 1732301681599 (+10 ms)Region opened successfully at 1732301681604 (+5 ms) 2024-11-22T18:54:41,604 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T18:54:41,607 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46b43d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:54:41,608 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T18:54:41,608 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T18:54:41,608 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T18:54:41,608 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T18:54:41,609 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T18:54:41,609 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T18:54:41,609 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T18:54:41,614 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T18:54:41,615 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T18:54:41,616 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T18:54:41,617 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T18:54:41,617 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T18:54:41,619 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T18:54:41,619 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T18:54:41,620 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T18:54:41,621 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T18:54:41,622 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T18:54:41,624 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T18:54:41,626 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T18:54:41,627 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T18:54:41,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:54:41,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:41,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T18:54:41,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:41,630 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d79ba0c344fb,40297,1732301681357, sessionid=0x1014107dd4e0000, setting cluster-up flag (Was=false) 2024-11-22T18:54:41,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:41,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:41,639 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T18:54:41,640 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,40297,1732301681357 2024-11-22T18:54:41,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:41,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:41,650 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T18:54:41,652 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d79ba0c344fb,40297,1732301681357 2024-11-22T18:54:41,653 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T18:54:41,654 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T18:54:41,655 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T18:54:41,655 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T18:54:41,655 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d79ba0c344fb,40297,1732301681357 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T18:54:41,656 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:54:41,656 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:54:41,656 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:54:41,656 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=5, maxPoolSize=5 2024-11-22T18:54:41,656 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d79ba0c344fb:0, corePoolSize=10, maxPoolSize=10 2024-11-22T18:54:41,656 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,656 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:54:41,656 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,657 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732301711657 2024-11-22T18:54:41,658 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T18:54:41,658 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T18:54:41,658 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T18:54:41,658 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T18:54:41,658 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T18:54:41,658 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T18:54:41,658 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,658 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:54:41,658 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T18:54:41,658 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T18:54:41,659 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T18:54:41,659 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T18:54:41,659 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T18:54:41,659 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T18:54:41,659 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301681659,5,FailOnTimeoutGroup] 2024-11-22T18:54:41,659 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:41,659 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301681659,5,FailOnTimeoutGroup] 2024-11-22T18:54:41,659 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,659 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T18:54:41,659 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,659 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,659 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T18:54:41,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:54:41,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741831_1007 (size=1321) 2024-11-22T18:54:41,679 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(746): ClusterId : 2c217b7f-2b07-4a6c-a44c-8ea4ecdebd89 2024-11-22T18:54:41,679 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T18:54:41,679 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T18:54:41,679 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a 2024-11-22T18:54:41,682 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T18:54:41,682 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T18:54:41,684 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T18:54:41,685 DEBUG [RS:0;d79ba0c344fb:36373 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6daee524, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d79ba0c344fb/172.17.0.2:0 2024-11-22T18:54:41,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:54:41,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741832_1008 (size=32) 2024-11-22T18:54:41,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:54:41,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:54:41,694 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:54:41,694 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:41,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:54:41,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:54:41,696 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:54:41,696 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:41,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:54:41,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:54:41,697 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:54:41,697 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:41,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:54:41,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:54:41,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:54:41,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:41,699 DEBUG [RS:0;d79ba0c344fb:36373 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d79ba0c344fb:36373 2024-11-22T18:54:41,699 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T18:54:41,699 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T18:54:41,699 DEBUG [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T18:54:41,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:54:41,699 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:54:41,700 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(2659): reportForDuty to master=d79ba0c344fb,40297,1732301681357 with port=36373, startcode=1732301681435 2024-11-22T18:54:41,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740 2024-11-22T18:54:41,700 DEBUG [RS:0;d79ba0c344fb:36373 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T18:54:41,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740 2024-11-22T18:54:41,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:54:41,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:54:41,702 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:54:41,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:54:41,705 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T18:54:41,706 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=765604, jitterRate=-0.026484951376914978}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:54:41,706 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53691, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T18:54:41,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732301681690Initializing all the Stores at 1732301681691 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301681692 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301681692Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301681692Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301681692Cleaning up temporary data from old regions at 1732301681702 (+10 ms)Region opened successfully at 1732301681706 (+4 ms) 2024-11-22T18:54:41,706 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40297 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:41,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:54:41,707 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:54:41,707 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40297 {}] master.ServerManager(517): Registering regionserver=d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:41,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:54:41,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:54:41,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:54:41,707 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:54:41,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301681706Disabling compacts and flushes for region at 1732301681706Disabling writes for close at 1732301681707 (+1 ms)Writing region close event to WAL at 1732301681707Closed at 1732301681707 2024-11-22T18:54:41,708 DEBUG [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a 2024-11-22T18:54:41,708 DEBUG [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37507 2024-11-22T18:54:41,708 DEBUG [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T18:54:41,708 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:54:41,708 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T18:54:41,708 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T18:54:41,710 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:54:41,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:54:41,710 DEBUG [RS:0;d79ba0c344fb:36373 {}] zookeeper.ZKUtil(111): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:41,710 WARN [RS:0;d79ba0c344fb:36373 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T18:54:41,710 INFO [RS:0;d79ba0c344fb:36373 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:54:41,711 DEBUG [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:41,711 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d79ba0c344fb,36373,1732301681435] 2024-11-22T18:54:41,712 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T18:54:41,714 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T18:54:41,716 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T18:54:41,717 INFO [RS:0;d79ba0c344fb:36373 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T18:54:41,717 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,717 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T18:54:41,718 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T18:54:41,718 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d79ba0c344fb:0, corePoolSize=2, maxPoolSize=2 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d79ba0c344fb:0, corePoolSize=1, maxPoolSize=1 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:54:41,718 DEBUG [RS:0;d79ba0c344fb:36373 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d79ba0c344fb:0, corePoolSize=3, maxPoolSize=3 2024-11-22T18:54:41,719 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,719 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,719 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,719 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,719 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,719 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,36373,1732301681435-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:54:41,737 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T18:54:41,737 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,36373,1732301681435-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,737 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,737 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.Replication(171): d79ba0c344fb,36373,1732301681435 started 2024-11-22T18:54:41,753 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:41,753 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(1482): Serving as d79ba0c344fb,36373,1732301681435, RpcServer on d79ba0c344fb/172.17.0.2:36373, sessionid=0x1014107dd4e0001 2024-11-22T18:54:41,753 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T18:54:41,753 DEBUG [RS:0;d79ba0c344fb:36373 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:41,753 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,36373,1732301681435' 2024-11-22T18:54:41,753 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T18:54:41,754 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T18:54:41,754 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T18:54:41,754 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T18:54:41,755 DEBUG [RS:0;d79ba0c344fb:36373 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:41,755 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd79ba0c344fb,36373,1732301681435' 2024-11-22T18:54:41,755 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T18:54:41,755 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T18:54:41,755 DEBUG [RS:0;d79ba0c344fb:36373 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T18:54:41,755 INFO [RS:0;d79ba0c344fb:36373 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T18:54:41,755 INFO [RS:0;d79ba0c344fb:36373 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T18:54:41,857 INFO [RS:0;d79ba0c344fb:36373 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C36373%2C1732301681435, suffix=, logDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/d79ba0c344fb,36373,1732301681435, archiveDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/oldWALs, maxLogs=32 2024-11-22T18:54:41,858 INFO [RS:0;d79ba0c344fb:36373 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C36373%2C1732301681435.1732301681858 2024-11-22T18:54:41,862 WARN [d79ba0c344fb:40297 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T18:54:41,864 INFO [RS:0;d79ba0c344fb:36373 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/d79ba0c344fb,36373,1732301681435/d79ba0c344fb%2C36373%2C1732301681435.1732301681858 2024-11-22T18:54:41,865 DEBUG [RS:0;d79ba0c344fb:36373 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32937:32937),(127.0.0.1/127.0.0.1:45613:45613)] 2024-11-22T18:54:42,112 DEBUG [d79ba0c344fb:40297 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T18:54:42,113 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:42,114 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,36373,1732301681435, state=OPENING 2024-11-22T18:54:42,117 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T18:54:42,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:42,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:42,119 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:54:42,119 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:54:42,119 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T18:54:42,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,36373,1732301681435}] 2024-11-22T18:54:42,272 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T18:54:42,274 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60533, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T18:54:42,277 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T18:54:42,277 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:54:42,279 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d79ba0c344fb%2C36373%2C1732301681435.meta, suffix=.meta, logDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/d79ba0c344fb,36373,1732301681435, archiveDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/oldWALs, maxLogs=32 2024-11-22T18:54:42,279 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d79ba0c344fb%2C36373%2C1732301681435.meta.1732301682279.meta 2024-11-22T18:54:42,294 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/d79ba0c344fb,36373,1732301681435/d79ba0c344fb%2C36373%2C1732301681435.meta.1732301682279.meta 2024-11-22T18:54:42,295 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45613:45613),(127.0.0.1/127.0.0.1:32937:32937)] 2024-11-22T18:54:42,296 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T18:54:42,296 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T18:54:42,296 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T18:54:42,296 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T18:54:42,297 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T18:54:42,297 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T18:54:42,297 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T18:54:42,297 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T18:54:42,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T18:54:42,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T18:54:42,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:42,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:54:42,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T18:54:42,300 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T18:54:42,300 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:42,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:54:42,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T18:54:42,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T18:54:42,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:42,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:54:42,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T18:54:42,302 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T18:54:42,302 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T18:54:42,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T18:54:42,303 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T18:54:42,303 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740 2024-11-22T18:54:42,304 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740 2024-11-22T18:54:42,305 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T18:54:42,305 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T18:54:42,306 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T18:54:42,307 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T18:54:42,308 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862964, jitterRate=0.0973162055015564}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T18:54:42,308 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T18:54:42,308 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732301682297Writing region info on filesystem at 1732301682297Initializing all the Stores at 1732301682298 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301682298Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301682298Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732301682298Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732301682298Cleaning up temporary data from old regions at 1732301682305 (+7 ms)Running coprocessor post-open hooks at 1732301682308 (+3 ms)Region opened successfully at 1732301682308 2024-11-22T18:54:42,309 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732301682272 2024-11-22T18:54:42,312 DEBUG [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T18:54:42,312 INFO [RS_OPEN_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T18:54:42,312 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:42,313 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d79ba0c344fb,36373,1732301681435, state=OPEN 2024-11-22T18:54:42,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:54:42,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T18:54:42,318 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:42,318 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:54:42,318 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T18:54:42,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T18:54:42,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d79ba0c344fb,36373,1732301681435 in 199 msec 2024-11-22T18:54:42,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T18:54:42,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-11-22T18:54:42,323 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T18:54:42,323 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T18:54:42,325 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:54:42,325 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,36373,1732301681435, seqNum=-1] 2024-11-22T18:54:42,325 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:54:42,326 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32793, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:54:42,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 676 msec 2024-11-22T18:54:42,331 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732301682331, completionTime=-1 2024-11-22T18:54:42,331 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T18:54:42,331 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T18:54:42,333 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T18:54:42,333 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732301742333 2024-11-22T18:54:42,333 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732301802333 2024-11-22T18:54:42,333 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T18:54:42,333 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,40297,1732301681357-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:42,333 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,40297,1732301681357-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:42,333 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,40297,1732301681357-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:42,334 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d79ba0c344fb:40297, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:42,334 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:42,334 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:42,335 DEBUG [master/d79ba0c344fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T18:54:42,337 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.836sec 2024-11-22T18:54:42,338 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T18:54:42,338 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T18:54:42,338 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T18:54:42,338 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T18:54:42,338 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T18:54:42,338 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,40297,1732301681357-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T18:54:42,338 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,40297,1732301681357-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T18:54:42,340 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T18:54:42,340 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T18:54:42,340 INFO [master/d79ba0c344fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d79ba0c344fb,40297,1732301681357-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T18:54:42,379 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23e03366, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:54:42,379 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d79ba0c344fb,40297,-1 for getting cluster id 2024-11-22T18:54:42,379 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T18:54:42,380 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2c217b7f-2b07-4a6c-a44c-8ea4ecdebd89' 2024-11-22T18:54:42,381 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T18:54:42,381 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2c217b7f-2b07-4a6c-a44c-8ea4ecdebd89" 2024-11-22T18:54:42,381 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4da16890, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:54:42,381 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d79ba0c344fb,40297,-1] 2024-11-22T18:54:42,382 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T18:54:42,382 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:42,383 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T18:54:42,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f4abee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T18:54:42,384 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T18:54:42,385 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d79ba0c344fb,36373,1732301681435, seqNum=-1] 2024-11-22T18:54:42,385 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T18:54:42,386 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36982, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T18:54:42,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d79ba0c344fb,40297,1732301681357 2024-11-22T18:54:42,388 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T18:54:42,390 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T18:54:42,391 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T18:54:42,393 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/test.com,8080,1, archiveDir=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/oldWALs, maxLogs=32 2024-11-22T18:54:42,393 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732301682393 2024-11-22T18:54:42,398 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/test.com,8080,1/test.com%2C8080%2C1.1732301682393 2024-11-22T18:54:42,398 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45613:45613),(127.0.0.1/127.0.0.1:32937:32937)] 2024-11-22T18:54:42,399 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732301682399 2024-11-22T18:54:42,404 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,404 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,404 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,408 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,408 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,408 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/test.com,8080,1/test.com%2C8080%2C1.1732301682393 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/test.com,8080,1/test.com%2C8080%2C1.1732301682399 2024-11-22T18:54:42,409 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45613:45613),(127.0.0.1/127.0.0.1:32937:32937)] 2024-11-22T18:54:42,409 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/test.com,8080,1/test.com%2C8080%2C1.1732301682393 is not closed yet, will try archiving it next time 2024-11-22T18:54:42,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741835_1011 (size=93) 2024-11-22T18:54:42,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741835_1011 (size=93) 2024-11-22T18:54:42,411 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,411 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,411 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,411 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,411 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,412 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/WALs/test.com,8080,1/test.com%2C8080%2C1.1732301682393 to hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/oldWALs/test.com%2C8080%2C1.1732301682393 2024-11-22T18:54:42,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741836_1012 (size=93) 2024-11-22T18:54:42,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741836_1012 (size=93) 2024-11-22T18:54:42,416 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/oldWALs 2024-11-22T18:54:42,416 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732301682399) 2024-11-22T18:54:42,416 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T18:54:42,416 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:54:42,416 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:54:42,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:42,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:42,416 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T18:54:42,416 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T18:54:42,416 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=852746234, stopped=false 2024-11-22T18:54:42,416 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d79ba0c344fb,40297,1732301681357 2024-11-22T18:54:42,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:54:42,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T18:54:42,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:42,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:42,422 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:54:42,422 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T18:54:42,422 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:54:42,422 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:42,423 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:54:42,423 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd79ba0c344fb,36373,1732301681435' ***** 2024-11-22T18:54:42,423 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T18:54:42,423 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T18:54:42,423 INFO [RS:0;d79ba0c344fb:36373 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T18:54:42,423 INFO [RS:0;d79ba0c344fb:36373 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T18:54:42,423 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(959): stopping server d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:42,423 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:54:42,423 INFO [RS:0;d79ba0c344fb:36373 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d79ba0c344fb:36373. 2024-11-22T18:54:42,423 DEBUG [RS:0;d79ba0c344fb:36373 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T18:54:42,423 DEBUG [RS:0;d79ba0c344fb:36373 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:42,424 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T18:54:42,424 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T18:54:42,424 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T18:54:42,424 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T18:54:42,424 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T18:54:42,424 DEBUG [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T18:54:42,424 DEBUG [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T18:54:42,424 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T18:54:42,424 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T18:54:42,424 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T18:54:42,424 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T18:54:42,424 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T18:54:42,424 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T18:54:42,424 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-22T18:54:42,425 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T18:54:42,445 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740/.tmp/ns/ceb8105c5a9447bbba11af0be37a9c51 is 43, key is default/ns:d/1732301682327/Put/seqid=0 2024-11-22T18:54:42,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741837_1013 (size=5153) 2024-11-22T18:54:42,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741837_1013 (size=5153) 2024-11-22T18:54:42,451 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740/.tmp/ns/ceb8105c5a9447bbba11af0be37a9c51 2024-11-22T18:54:42,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,42437,1732301479035/d79ba0c344fb%2C42437%2C1732301479035.meta.1732301479898.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:42,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44035/user/jenkins/test-data/821b3491-b558-189d-7f09-7f8286ca5e1e/WALs/d79ba0c344fb,35983,1732301480111/d79ba0c344fb%2C35983%2C1732301480111.1732301480390 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T18:54:42,456 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740/.tmp/ns/ceb8105c5a9447bbba11af0be37a9c51 as hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740/ns/ceb8105c5a9447bbba11af0be37a9c51 2024-11-22T18:54:42,461 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740/ns/ceb8105c5a9447bbba11af0be37a9c51, entries=2, sequenceid=6, filesize=5.0 K 2024-11-22T18:54:42,462 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false 2024-11-22T18:54:42,466 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T18:54:42,467 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T18:54:42,467 INFO [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T18:54:42,467 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732301682424Running coprocessor pre-close hooks at 1732301682424Disabling compacts and flushes for region at 1732301682424Disabling writes for close at 1732301682424Obtaining lock to block concurrent updates at 1732301682424Preparing flush snapshotting stores in 1588230740 at 1732301682424Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732301682425 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732301682425Flushing 1588230740/ns: creating writer at 1732301682426 (+1 ms)Flushing 1588230740/ns: appending metadata at 1732301682445 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732301682445Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29c439b9: reopening flushed file at 1732301682455 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false at 1732301682462 (+7 ms)Writing region close event to WAL at 1732301682463 (+1 ms)Running coprocessor post-close hooks at 1732301682467 (+4 ms)Closed at 1732301682467 2024-11-22T18:54:42,467 DEBUG [RS_CLOSE_META-regionserver/d79ba0c344fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T18:54:42,624 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(976): stopping server d79ba0c344fb,36373,1732301681435; all regions closed. 2024-11-22T18:54:42,625 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,625 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,625 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,625 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,625 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741834_1010 (size=1152) 2024-11-22T18:54:42,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741834_1010 (size=1152) 2024-11-22T18:54:42,629 DEBUG [RS:0;d79ba0c344fb:36373 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/oldWALs 2024-11-22T18:54:42,629 INFO [RS:0;d79ba0c344fb:36373 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C36373%2C1732301681435.meta:.meta(num 1732301682279) 2024-11-22T18:54:42,630 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,630 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,630 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,630 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,630 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741833_1009 (size=93) 2024-11-22T18:54:42,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741833_1009 (size=93) 2024-11-22T18:54:42,633 DEBUG [RS:0;d79ba0c344fb:36373 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/oldWALs 2024-11-22T18:54:42,633 INFO [RS:0;d79ba0c344fb:36373 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d79ba0c344fb%2C36373%2C1732301681435:(num 1732301681858) 2024-11-22T18:54:42,634 DEBUG [RS:0;d79ba0c344fb:36373 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T18:54:42,634 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T18:54:42,634 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:54:42,634 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.ChoreService(370): Chore service for: regionserver/d79ba0c344fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T18:54:42,634 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:54:42,634 INFO [regionserver/d79ba0c344fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:54:42,634 INFO [RS:0;d79ba0c344fb:36373 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36373 2024-11-22T18:54:42,636 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:54:42,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d79ba0c344fb,36373,1732301681435 2024-11-22T18:54:42,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T18:54:42,638 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d79ba0c344fb,36373,1732301681435] 2024-11-22T18:54:42,639 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d79ba0c344fb,36373,1732301681435 already deleted, retry=false 2024-11-22T18:54:42,639 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d79ba0c344fb,36373,1732301681435 expired; onlineServers=0 2024-11-22T18:54:42,639 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd79ba0c344fb,40297,1732301681357' ***** 2024-11-22T18:54:42,639 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T18:54:42,639 INFO [M:0;d79ba0c344fb:40297 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T18:54:42,639 INFO [M:0;d79ba0c344fb:40297 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T18:54:42,639 DEBUG [M:0;d79ba0c344fb:40297 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T18:54:42,639 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T18:54:42,639 DEBUG [M:0;d79ba0c344fb:40297 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T18:54:42,639 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301681659 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.small.0-1732301681659,5,FailOnTimeoutGroup] 2024-11-22T18:54:42,639 DEBUG [master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301681659 {}] cleaner.HFileCleaner(306): Exit Thread[master/d79ba0c344fb:0:becomeActiveMaster-HFileCleaner.large.0-1732301681659,5,FailOnTimeoutGroup] 2024-11-22T18:54:42,640 INFO [M:0;d79ba0c344fb:40297 {}] hbase.ChoreService(370): Chore service for: master/d79ba0c344fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T18:54:42,640 INFO [M:0;d79ba0c344fb:40297 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T18:54:42,640 DEBUG [M:0;d79ba0c344fb:40297 {}] master.HMaster(1795): Stopping service threads 2024-11-22T18:54:42,640 INFO [M:0;d79ba0c344fb:40297 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T18:54:42,640 INFO [M:0;d79ba0c344fb:40297 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T18:54:42,640 INFO [M:0;d79ba0c344fb:40297 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T18:54:42,640 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T18:54:42,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T18:54:42,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T18:54:42,643 DEBUG [M:0;d79ba0c344fb:40297 {}] zookeeper.ZKUtil(347): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T18:54:42,643 WARN [M:0;d79ba0c344fb:40297 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T18:54:42,644 INFO [M:0;d79ba0c344fb:40297 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/.lastflushedseqids 2024-11-22T18:54:42,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741838_1014 (size=99) 2024-11-22T18:54:42,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741838_1014 (size=99) 2024-11-22T18:54:42,649 INFO [M:0;d79ba0c344fb:40297 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T18:54:42,649 INFO [M:0;d79ba0c344fb:40297 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T18:54:42,650 DEBUG [M:0;d79ba0c344fb:40297 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T18:54:42,650 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:42,650 DEBUG [M:0;d79ba0c344fb:40297 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:42,650 DEBUG [M:0;d79ba0c344fb:40297 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T18:54:42,650 DEBUG [M:0;d79ba0c344fb:40297 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:42,650 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-22T18:54:42,669 DEBUG [M:0;d79ba0c344fb:40297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab45ec6c73594fb688fb8c61c14e37c0 is 82, key is hbase:meta,,1/info:regioninfo/1732301682312/Put/seqid=0 2024-11-22T18:54:42,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741839_1015 (size=5672) 2024-11-22T18:54:42,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741839_1015 (size=5672) 2024-11-22T18:54:42,675 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab45ec6c73594fb688fb8c61c14e37c0 2024-11-22T18:54:42,698 DEBUG [M:0;d79ba0c344fb:40297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/21204673cfc141fabc92bcc9aff2fe00 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732301682330/Put/seqid=0 2024-11-22T18:54:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741840_1016 (size=5275) 2024-11-22T18:54:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741840_1016 (size=5275) 2024-11-22T18:54:42,703 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/21204673cfc141fabc92bcc9aff2fe00 2024-11-22T18:54:42,723 DEBUG [M:0;d79ba0c344fb:40297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/811ed16900d14c8d9672c0226ce7c970 is 69, key is d79ba0c344fb,36373,1732301681435/rs:state/1732301681707/Put/seqid=0 2024-11-22T18:54:42,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741841_1017 (size=5156) 2024-11-22T18:54:42,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741841_1017 (size=5156) 2024-11-22T18:54:42,731 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/811ed16900d14c8d9672c0226ce7c970 2024-11-22T18:54:42,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:54:42,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36373-0x1014107dd4e0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:54:42,738 INFO [RS:0;d79ba0c344fb:36373 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:54:42,738 INFO [RS:0;d79ba0c344fb:36373 {}] regionserver.HRegionServer(1031): Exiting; stopping=d79ba0c344fb,36373,1732301681435; zookeeper connection closed. 2024-11-22T18:54:42,738 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69488e5c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69488e5c 2024-11-22T18:54:42,738 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T18:54:42,757 DEBUG [M:0;d79ba0c344fb:40297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/717aaa1dd23a47f5b738bef16effa82d is 52, key is load_balancer_on/state:d/1732301682389/Put/seqid=0 2024-11-22T18:54:42,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741842_1018 (size=5056) 2024-11-22T18:54:42,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741842_1018 (size=5056) 2024-11-22T18:54:42,762 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/717aaa1dd23a47f5b738bef16effa82d 2024-11-22T18:54:42,767 DEBUG [M:0;d79ba0c344fb:40297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab45ec6c73594fb688fb8c61c14e37c0 as hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ab45ec6c73594fb688fb8c61c14e37c0 2024-11-22T18:54:42,771 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ab45ec6c73594fb688fb8c61c14e37c0, entries=8, sequenceid=29, filesize=5.5 K 2024-11-22T18:54:42,771 DEBUG [M:0;d79ba0c344fb:40297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/21204673cfc141fabc92bcc9aff2fe00 as hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/21204673cfc141fabc92bcc9aff2fe00 2024-11-22T18:54:42,776 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/21204673cfc141fabc92bcc9aff2fe00, entries=3, sequenceid=29, filesize=5.2 K 2024-11-22T18:54:42,781 DEBUG [M:0;d79ba0c344fb:40297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/811ed16900d14c8d9672c0226ce7c970 as hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/811ed16900d14c8d9672c0226ce7c970 2024-11-22T18:54:42,785 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/811ed16900d14c8d9672c0226ce7c970, entries=1, sequenceid=29, filesize=5.0 K 2024-11-22T18:54:42,786 DEBUG [M:0;d79ba0c344fb:40297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/717aaa1dd23a47f5b738bef16effa82d as hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/717aaa1dd23a47f5b738bef16effa82d 2024-11-22T18:54:42,790 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37507/user/jenkins/test-data/de0631cb-8c1b-07b8-d558-c7ec20985f5a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/717aaa1dd23a47f5b738bef16effa82d, entries=1, sequenceid=29, filesize=4.9 K 2024-11-22T18:54:42,791 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false 2024-11-22T18:54:42,793 INFO [M:0;d79ba0c344fb:40297 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T18:54:42,793 DEBUG [M:0;d79ba0c344fb:40297 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732301682650Disabling compacts and flushes for region at 1732301682650Disabling writes for close at 1732301682650Obtaining lock to block concurrent updates at 1732301682650Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732301682650Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732301682650Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732301682651 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732301682651Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732301682668 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732301682668Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732301682680 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732301682698 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732301682698Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732301682707 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732301682723 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732301682723Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732301682735 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732301682756 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732301682756Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52898d3a: reopening flushed file at 1732301682766 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bd9a67c: reopening flushed file at 1732301682771 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@def5ba8: reopening flushed file at 1732301682776 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64d1cc38: reopening flushed file at 1732301682785 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false at 1732301682791 (+6 ms)Writing region close event to WAL at 1732301682793 (+2 ms)Closed at 1732301682793 2024-11-22T18:54:42,793 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,793 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,793 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,794 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,794 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T18:54:42,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741830_1006 (size=10311) 2024-11-22T18:54:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42147 is added to blk_1073741830_1006 (size=10311) 2024-11-22T18:54:42,796 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T18:54:42,796 INFO [M:0;d79ba0c344fb:40297 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T18:54:42,796 INFO [M:0;d79ba0c344fb:40297 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40297 2024-11-22T18:54:42,797 INFO [M:0;d79ba0c344fb:40297 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T18:54:42,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:54:42,900 INFO [M:0;d79ba0c344fb:40297 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T18:54:42,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40297-0x1014107dd4e0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T18:54:42,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@380ffe40{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:54:42,903 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e35321a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:54:42,903 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:54:42,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1926aa54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:54:42,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b825638{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/hadoop.log.dir/,STOPPED} 2024-11-22T18:54:42,904 WARN [BP-1434225187-172.17.0.2-1732301680564 heartbeating to localhost/127.0.0.1:37507 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:54:42,904 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:54:42,904 WARN [BP-1434225187-172.17.0.2-1732301680564 heartbeating to localhost/127.0.0.1:37507 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1434225187-172.17.0.2-1732301680564 (Datanode Uuid e6f51d81-1bd8-4489-8468-37e778d978d3) service to localhost/127.0.0.1:37507 2024-11-22T18:54:42,904 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:54:42,905 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/data/data3/current/BP-1434225187-172.17.0.2-1732301680564 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:54:42,905 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/data/data4/current/BP-1434225187-172.17.0.2-1732301680564 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:54:42,905 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:54:42,907 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@150dab73{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T18:54:42,908 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6486a7e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:54:42,908 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:54:42,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10c1adfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:54:42,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5be97557{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/hadoop.log.dir/,STOPPED} 2024-11-22T18:54:42,909 WARN [BP-1434225187-172.17.0.2-1732301680564 heartbeating to localhost/127.0.0.1:37507 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T18:54:42,909 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T18:54:42,909 WARN [BP-1434225187-172.17.0.2-1732301680564 heartbeating to localhost/127.0.0.1:37507 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1434225187-172.17.0.2-1732301680564 (Datanode Uuid c255f87f-5ee4-4f1a-9c1a-78503d585de7) service to localhost/127.0.0.1:37507 2024-11-22T18:54:42,909 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T18:54:42,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/data/data1/current/BP-1434225187-172.17.0.2-1732301680564 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:54:42,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/cluster_f3233d8e-dcc9-5c97-be54-7de882edd310/data/data2/current/BP-1434225187-172.17.0.2-1732301680564 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T18:54:42,910 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T18:54:42,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4bedee20{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T18:54:42,916 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27862096{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T18:54:42,916 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T18:54:42,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@412c5a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T18:54:42,917 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fee3652{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45c06bc-d634-55a5-8db6-8b5158c5f157/hadoop.log.dir/,STOPPED} 2024-11-22T18:54:42,923 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T18:54:42,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T18:54:42,949 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 227) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37507 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37507 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37507 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37507 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37507 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37507 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37507 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:37507 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=534 (was 503) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=149 (was 149), ProcessCount=11 (was 11), AvailableMemoryMB=7066 (was 7081)