2024-11-11 20:43:31,700 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-11 20:43:31,712 main DEBUG Took 0.010396 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-11 20:43:31,713 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-11 20:43:31,714 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-11 20:43:31,715 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-11 20:43:31,716 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,723 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-11 20:43:31,735 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,736 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,737 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,738 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,738 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,739 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,739 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,740 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,740 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,741 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,742 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,742 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,743 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,743 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,743 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,744 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,744 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,745 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,745 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,745 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,746 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,746 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,747 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,747 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 20:43:31,747 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,748 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-11 20:43:31,749 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 20:43:31,751 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-11 20:43:31,753 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-11 20:43:31,754 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-11 20:43:31,755 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-11 20:43:31,755 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-11 20:43:31,764 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-11 20:43:31,767 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-11 20:43:31,768 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-11 20:43:31,769 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-11 20:43:31,769 main DEBUG createAppenders(={Console}) 2024-11-11 20:43:31,770 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-11 20:43:31,771 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-11 20:43:31,771 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-11 20:43:31,772 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-11 20:43:31,772 main DEBUG OutputStream closed 2024-11-11 20:43:31,772 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-11 20:43:31,772 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-11 20:43:31,773 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-11 20:43:31,847 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-11 20:43:31,849 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-11 20:43:31,851 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-11 20:43:31,852 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-11 20:43:31,852 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-11 20:43:31,853 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-11 20:43:31,853 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-11 20:43:31,854 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-11 20:43:31,854 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-11 20:43:31,854 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-11 20:43:31,855 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-11 20:43:31,855 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-11 20:43:31,855 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-11 20:43:31,856 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-11 20:43:31,856 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-11 20:43:31,856 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-11 20:43:31,857 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-11 20:43:31,858 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-11 20:43:31,860 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11 20:43:31,860 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-11 20:43:31,861 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-11 20:43:31,861 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-11T20:43:32,101 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d 2024-11-11 20:43:32,105 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-11 20:43:32,105 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11T20:43:32,115 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-11T20:43:32,152 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=278, ProcessCount=11, AvailableMemoryMB=5924 2024-11-11T20:43:32,156 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T20:43:32,178 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe, deleteOnExit=true 2024-11-11T20:43:32,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T20:43:32,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/test.cache.data in system properties and HBase conf 2024-11-11T20:43:32,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T20:43:32,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/hadoop.log.dir in system properties and HBase conf 2024-11-11T20:43:32,183 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T20:43:32,184 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T20:43:32,184 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T20:43:32,309 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-11T20:43:32,432 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T20:43:32,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:43:32,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:43:32,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T20:43:32,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:43:32,441 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T20:43:32,441 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T20:43:32,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:43:32,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:43:32,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T20:43:32,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/nfs.dump.dir in system properties and HBase conf 2024-11-11T20:43:32,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/java.io.tmpdir in system properties and HBase conf 2024-11-11T20:43:32,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:43:32,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T20:43:32,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T20:43:33,009 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:43:33,375 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-11T20:43:33,465 INFO [Time-limited test {}] log.Log(170): Logging initialized @2688ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-11T20:43:33,547 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:43:33,621 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:43:33,648 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:43:33,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:43:33,650 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T20:43:33,664 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:43:33,668 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:43:33,670 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:43:33,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/java.io.tmpdir/jetty-localhost-32981-hadoop-hdfs-3_4_1-tests_jar-_-any-11581129257618213762/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:43:33,901 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:32981} 2024-11-11T20:43:33,901 INFO [Time-limited test {}] server.Server(415): Started @3126ms 2024-11-11T20:43:33,936 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:43:34,342 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:43:34,353 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:43:34,359 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:43:34,359 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:43:34,360 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:43:34,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:43:34,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:43:34,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/java.io.tmpdir/jetty-localhost-42357-hadoop-hdfs-3_4_1-tests_jar-_-any-14515803091332240265/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:43:34,488 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:42357} 2024-11-11T20:43:34,488 INFO [Time-limited test {}] server.Server(415): Started @3712ms 2024-11-11T20:43:34,553 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:43:34,701 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:43:34,708 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:43:34,713 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:43:34,714 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:43:34,714 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:43:34,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:43:34,716 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:43:34,872 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/java.io.tmpdir/jetty-localhost-44887-hadoop-hdfs-3_4_1-tests_jar-_-any-4589763173969749613/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:43:34,877 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:44887} 2024-11-11T20:43:34,877 INFO [Time-limited test {}] server.Server(415): Started @4102ms 2024-11-11T20:43:34,880 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:43:35,058 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/data/data1/current/BP-2014057948-172.17.0.2-1731357813118/current, will proceed with Du for space computation calculation, 2024-11-11T20:43:35,061 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/data/data2/current/BP-2014057948-172.17.0.2-1731357813118/current, will proceed with Du for space computation calculation, 2024-11-11T20:43:35,067 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/data/data4/current/BP-2014057948-172.17.0.2-1731357813118/current, will proceed with Du for space computation calculation, 2024-11-11T20:43:35,068 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/data/data3/current/BP-2014057948-172.17.0.2-1731357813118/current, will proceed with Du for space computation calculation, 2024-11-11T20:43:35,144 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:43:35,165 WARN [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:43:35,241 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe20510ca6a366f8b with lease ID 0x675b31de29235eff: Processing first storage report for DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a from datanode DatanodeRegistration(127.0.0.1:34943, datanodeUuid=820ed1f1-7b25-47ed-9c76-4fffdf22c9c0, infoPort=35551, infoSecurePort=0, ipcPort=38773, storageInfo=lv=-57;cid=testClusterID;nsid=1217167724;c=1731357813118) 2024-11-11T20:43:35,243 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe20510ca6a366f8b with lease ID 0x675b31de29235eff: from storage DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a node DatanodeRegistration(127.0.0.1:34943, datanodeUuid=820ed1f1-7b25-47ed-9c76-4fffdf22c9c0, infoPort=35551, infoSecurePort=0, ipcPort=38773, storageInfo=lv=-57;cid=testClusterID;nsid=1217167724;c=1731357813118), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T20:43:35,243 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7223340278517c32 with lease ID 0x675b31de29235f00: Processing first storage report for DS-e0d6e228-645c-44f5-b9f9-97024982914f from datanode DatanodeRegistration(127.0.0.1:36315, datanodeUuid=f5687c13-e644-47f6-9438-ade2898dfaff, infoPort=44869, infoSecurePort=0, ipcPort=38793, storageInfo=lv=-57;cid=testClusterID;nsid=1217167724;c=1731357813118) 2024-11-11T20:43:35,244 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7223340278517c32 with lease ID 0x675b31de29235f00: from storage DS-e0d6e228-645c-44f5-b9f9-97024982914f node DatanodeRegistration(127.0.0.1:36315, datanodeUuid=f5687c13-e644-47f6-9438-ade2898dfaff, infoPort=44869, infoSecurePort=0, ipcPort=38793, storageInfo=lv=-57;cid=testClusterID;nsid=1217167724;c=1731357813118), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:43:35,244 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe20510ca6a366f8b with lease ID 0x675b31de29235eff: Processing first storage report for DS-293c34dc-d6b8-4c4e-bade-bd5339f06555 from datanode DatanodeRegistration(127.0.0.1:34943, datanodeUuid=820ed1f1-7b25-47ed-9c76-4fffdf22c9c0, infoPort=35551, infoSecurePort=0, ipcPort=38773, storageInfo=lv=-57;cid=testClusterID;nsid=1217167724;c=1731357813118) 2024-11-11T20:43:35,244 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe20510ca6a366f8b with lease ID 0x675b31de29235eff: from storage DS-293c34dc-d6b8-4c4e-bade-bd5339f06555 node DatanodeRegistration(127.0.0.1:34943, datanodeUuid=820ed1f1-7b25-47ed-9c76-4fffdf22c9c0, infoPort=35551, infoSecurePort=0, ipcPort=38773, storageInfo=lv=-57;cid=testClusterID;nsid=1217167724;c=1731357813118), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:43:35,245 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7223340278517c32 with lease ID 0x675b31de29235f00: Processing first storage report for DS-1e576144-e1d2-4529-bcef-0f42c2e9fa7d from datanode DatanodeRegistration(127.0.0.1:36315, datanodeUuid=f5687c13-e644-47f6-9438-ade2898dfaff, infoPort=44869, infoSecurePort=0, ipcPort=38793, storageInfo=lv=-57;cid=testClusterID;nsid=1217167724;c=1731357813118) 2024-11-11T20:43:35,245 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7223340278517c32 with lease ID 0x675b31de29235f00: from storage DS-1e576144-e1d2-4529-bcef-0f42c2e9fa7d node DatanodeRegistration(127.0.0.1:36315, datanodeUuid=f5687c13-e644-47f6-9438-ade2898dfaff, infoPort=44869, infoSecurePort=0, ipcPort=38793, storageInfo=lv=-57;cid=testClusterID;nsid=1217167724;c=1731357813118), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:43:35,361 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d 2024-11-11T20:43:35,461 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/zookeeper_0, clientPort=58576, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T20:43:35,473 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58576 2024-11-11T20:43:35,487 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:43:35,491 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:43:35,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:43:35,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:43:36,191 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1 with version=8 2024-11-11T20:43:36,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/hbase-staging 2024-11-11T20:43:36,285 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-11T20:43:36,575 INFO [Time-limited test {}] client.ConnectionUtils(128): master/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:43:36,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:43:36,587 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:43:36,591 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:43:36,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:43:36,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:43:36,761 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T20:43:36,836 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-11T20:43:36,850 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-11T20:43:36,855 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:43:36,891 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 13433 (auto-detected) 2024-11-11T20:43:36,893 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-11T20:43:36,922 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39595 2024-11-11T20:43:36,955 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39595 connecting to ZooKeeper ensemble=127.0.0.1:58576 2024-11-11T20:43:37,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395950x0, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:43:37,021 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39595-0x10030870ef70000 connected 2024-11-11T20:43:37,091 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:43:37,095 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:43:37,107 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:43:37,113 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1, hbase.cluster.distributed=false 2024-11-11T20:43:37,145 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:43:37,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39595 2024-11-11T20:43:37,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39595 2024-11-11T20:43:37,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39595 2024-11-11T20:43:37,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39595 2024-11-11T20:43:37,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39595 2024-11-11T20:43:37,280 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:43:37,281 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:43:37,282 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:43:37,282 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:43:37,282 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:43:37,282 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:43:37,285 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T20:43:37,287 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:43:37,288 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37423 2024-11-11T20:43:37,291 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37423 connecting to ZooKeeper ensemble=127.0.0.1:58576 2024-11-11T20:43:37,292 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:43:37,299 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:43:37,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:374230x0, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:43:37,309 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:374230x0, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:43:37,309 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37423-0x10030870ef70001 connected 2024-11-11T20:43:37,314 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T20:43:37,326 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T20:43:37,328 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T20:43:37,333 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:43:37,334 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37423 2024-11-11T20:43:37,334 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37423 2024-11-11T20:43:37,334 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37423 2024-11-11T20:43:37,335 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37423 2024-11-11T20:43:37,335 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37423 2024-11-11T20:43:37,353 DEBUG [M:0;51ca66f7ee3c:39595 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;51ca66f7ee3c:39595 2024-11-11T20:43:37,354 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/51ca66f7ee3c,39595,1731357816356 2024-11-11T20:43:37,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:43:37,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:43:37,365 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/51ca66f7ee3c,39595,1731357816356 2024-11-11T20:43:37,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T20:43:37,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:37,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:37,386 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T20:43:37,387 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/51ca66f7ee3c,39595,1731357816356 from backup master directory 2024-11-11T20:43:37,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/51ca66f7ee3c,39595,1731357816356 2024-11-11T20:43:37,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:43:37,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:43:37,390 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:43:37,390 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=51ca66f7ee3c,39595,1731357816356 2024-11-11T20:43:37,392 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-11T20:43:37,393 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-11T20:43:37,453 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/hbase.id] with ID: ee9776d7-c8b1-4d74-833b-6b57385f7473 2024-11-11T20:43:37,453 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/.tmp/hbase.id 2024-11-11T20:43:37,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:43:37,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:43:37,471 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/.tmp/hbase.id]:[hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/hbase.id] 2024-11-11T20:43:37,519 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:43:37,524 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T20:43:37,545 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-11T20:43:37,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:37,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:37,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:43:37,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:43:37,584 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:43:37,586 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T20:43:37,591 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:43:37,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:43:37,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:43:37,642 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store 2024-11-11T20:43:37,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:43:37,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:43:37,677 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-11T20:43:37,680 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:43:37,681 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:43:37,681 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:43:37,682 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:43:37,683 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:43:37,683 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:43:37,684 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:43:37,685 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731357817681Disabling compacts and flushes for region at 1731357817681Disabling writes for close at 1731357817683 (+2 ms)Writing region close event to WAL at 1731357817683Closed at 1731357817683 2024-11-11T20:43:37,687 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/.initializing 2024-11-11T20:43:37,687 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/WALs/51ca66f7ee3c,39595,1731357816356 2024-11-11T20:43:37,709 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C39595%2C1731357816356, suffix=, logDir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/WALs/51ca66f7ee3c,39595,1731357816356, archiveDir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/oldWALs, maxLogs=10 2024-11-11T20:43:37,721 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C39595%2C1731357816356.1731357817714 2024-11-11T20:43:37,749 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/WALs/51ca66f7ee3c,39595,1731357816356/51ca66f7ee3c%2C39595%2C1731357816356.1731357817714 2024-11-11T20:43:37,761 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35551:35551),(127.0.0.1/127.0.0.1:44869:44869)] 2024-11-11T20:43:37,762 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:43:37,763 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:43:37,767 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,768 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,813 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T20:43:37,845 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:37,850 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:43:37,851 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T20:43:37,854 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:37,856 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:43:37,856 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T20:43:37,860 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:37,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:43:37,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T20:43:37,866 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:37,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:43:37,868 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,872 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,873 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,878 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,878 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,882 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T20:43:37,887 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:43:37,893 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:43:37,896 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882267, jitterRate=0.12186147272586823}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T20:43:37,903 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731357817786Initializing all the Stores at 1731357817788 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357817788Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357817789 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357817789Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357817789Cleaning up temporary data from old regions at 1731357817879 (+90 ms)Region opened successfully at 1731357817903 (+24 ms) 2024-11-11T20:43:37,905 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T20:43:37,949 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68bfe340, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:43:37,982 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T20:43:37,999 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T20:43:38,000 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T20:43:38,004 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T20:43:38,006 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-11T20:43:38,011 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-11T20:43:38,011 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T20:43:38,045 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T20:43:38,057 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T20:43:38,060 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T20:43:38,063 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T20:43:38,065 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T20:43:38,067 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T20:43:38,070 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T20:43:38,074 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T20:43:38,076 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T20:43:38,078 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T20:43:38,079 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T20:43:38,102 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T20:43:38,103 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T20:43:38,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:43:38,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:43:38,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:38,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:38,110 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=51ca66f7ee3c,39595,1731357816356, sessionid=0x10030870ef70000, setting cluster-up flag (Was=false) 2024-11-11T20:43:38,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:38,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:38,132 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T20:43:38,135 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,39595,1731357816356 2024-11-11T20:43:38,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:38,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:38,149 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T20:43:38,152 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,39595,1731357816356 2024-11-11T20:43:38,161 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T20:43:38,241 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(746): ClusterId : ee9776d7-c8b1-4d74-833b-6b57385f7473 2024-11-11T20:43:38,244 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T20:43:38,251 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T20:43:38,251 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T20:43:38,263 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T20:43:38,264 DEBUG [RS:0;51ca66f7ee3c:37423 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a7e6613, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:43:38,273 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T20:43:38,283 DEBUG [RS:0;51ca66f7ee3c:37423 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;51ca66f7ee3c:37423 2024-11-11T20:43:38,286 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T20:43:38,286 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T20:43:38,286 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T20:43:38,287 DEBUG [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T20:43:38,291 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(2659): reportForDuty to master=51ca66f7ee3c,39595,1731357816356 with port=37423, startcode=1731357817233 2024-11-11T20:43:38,300 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T20:43:38,309 DEBUG [RS:0;51ca66f7ee3c:37423 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T20:43:38,308 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 51ca66f7ee3c,39595,1731357816356 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T20:43:38,318 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:43:38,318 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:43:38,318 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:43:38,319 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:43:38,319 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/51ca66f7ee3c:0, corePoolSize=10, maxPoolSize=10 2024-11-11T20:43:38,319 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,319 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:43:38,320 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,338 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:43:38,339 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T20:43:38,345 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731357848345 2024-11-11T20:43:38,347 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T20:43:38,349 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T20:43:38,353 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T20:43:38,352 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:38,353 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T20:43:38,354 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T20:43:38,354 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T20:43:38,354 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T20:43:38,358 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,370 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T20:43:38,371 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T20:43:38,372 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T20:43:38,385 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T20:43:38,386 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T20:43:38,398 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357818389,5,FailOnTimeoutGroup] 2024-11-11T20:43:38,400 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357818398,5,FailOnTimeoutGroup] 2024-11-11T20:43:38,400 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,400 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T20:43:38,402 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,402 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:43:38,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:43:38,417 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43867, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T20:43:38,418 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T20:43:38,419 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1 2024-11-11T20:43:38,430 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39595 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:38,433 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39595 {}] master.ServerManager(517): Registering regionserver=51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:38,449 DEBUG [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1 2024-11-11T20:43:38,450 DEBUG [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43051 2024-11-11T20:43:38,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:43:38,450 DEBUG [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T20:43:38,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:43:38,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:43:38,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:43:38,457 DEBUG [RS:0;51ca66f7ee3c:37423 {}] zookeeper.ZKUtil(111): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:38,458 WARN [RS:0;51ca66f7ee3c:37423 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:43:38,458 INFO [RS:0;51ca66f7ee3c:37423 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:43:38,458 DEBUG [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:38,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:43:38,461 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [51ca66f7ee3c,37423,1731357817233] 2024-11-11T20:43:38,463 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:43:38,463 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:38,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:43:38,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:43:38,469 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:43:38,469 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:38,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:43:38,471 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:43:38,479 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:43:38,479 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:38,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:43:38,481 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:43:38,484 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:43:38,485 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:38,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:43:38,487 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:43:38,494 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740 2024-11-11T20:43:38,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740 2024-11-11T20:43:38,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:43:38,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:43:38,500 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:43:38,501 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T20:43:38,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:43:38,511 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:43:38,512 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769863, jitterRate=-0.021069839596748352}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:43:38,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731357818455Initializing all the Stores at 1731357818458 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357818459 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357818459Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357818459Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357818459Cleaning up temporary data from old regions at 1731357818499 (+40 ms)Region opened successfully at 1731357818517 (+18 ms) 2024-11-11T20:43:38,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:43:38,517 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:43:38,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:43:38,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:43:38,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:43:38,522 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:43:38,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731357818517Disabling compacts and flushes for region at 1731357818517Disabling writes for close at 1731357818518 (+1 ms)Writing region close event to WAL at 1731357818522 (+4 ms)Closed at 1731357818522 2024-11-11T20:43:38,523 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T20:43:38,525 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:43:38,525 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T20:43:38,528 INFO [RS:0;51ca66f7ee3c:37423 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T20:43:38,528 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,530 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T20:43:38,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T20:43:38,536 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T20:43:38,538 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,538 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,538 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,538 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,538 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,539 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,539 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:43:38,539 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,539 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,539 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,540 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,540 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,540 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:43:38,540 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:43:38,540 DEBUG [RS:0;51ca66f7ee3c:37423 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:43:38,541 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:43:38,543 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T20:43:38,545 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,546 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,546 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,546 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,546 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,546 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,37423,1731357817233-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:43:38,572 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T20:43:38,574 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,37423,1731357817233-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,575 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,575 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.Replication(171): 51ca66f7ee3c,37423,1731357817233 started 2024-11-11T20:43:38,601 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:38,601 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1482): Serving as 51ca66f7ee3c,37423,1731357817233, RpcServer on 51ca66f7ee3c/172.17.0.2:37423, sessionid=0x10030870ef70001 2024-11-11T20:43:38,602 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T20:43:38,602 DEBUG [RS:0;51ca66f7ee3c:37423 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:38,603 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,37423,1731357817233' 2024-11-11T20:43:38,603 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T20:43:38,604 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T20:43:38,605 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T20:43:38,605 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T20:43:38,605 DEBUG [RS:0;51ca66f7ee3c:37423 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:38,605 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,37423,1731357817233' 2024-11-11T20:43:38,605 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T20:43:38,607 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T20:43:38,608 DEBUG [RS:0;51ca66f7ee3c:37423 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T20:43:38,608 INFO [RS:0;51ca66f7ee3c:37423 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T20:43:38,609 INFO [RS:0;51ca66f7ee3c:37423 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T20:43:38,694 WARN [51ca66f7ee3c:39595 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T20:43:38,717 INFO [RS:0;51ca66f7ee3c:37423 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C37423%2C1731357817233, suffix=, logDir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233, archiveDir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs, maxLogs=32 2024-11-11T20:43:38,720 INFO [RS:0;51ca66f7ee3c:37423 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C37423%2C1731357817233.1731357818720 2024-11-11T20:43:38,731 INFO [RS:0;51ca66f7ee3c:37423 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357818720 2024-11-11T20:43:38,732 DEBUG [RS:0;51ca66f7ee3c:37423 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44869:44869),(127.0.0.1/127.0.0.1:35551:35551)] 2024-11-11T20:43:38,947 DEBUG [51ca66f7ee3c:39595 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T20:43:38,959 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:38,966 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,37423,1731357817233, state=OPENING 2024-11-11T20:43:38,970 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T20:43:38,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:38,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:43:38,972 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:43:38,972 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:43:38,974 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:43:38,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,37423,1731357817233}] 2024-11-11T20:43:39,150 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T20:43:39,154 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42129, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T20:43:39,165 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T20:43:39,166 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:43:39,169 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C37423%2C1731357817233.meta, suffix=.meta, logDir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233, archiveDir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs, maxLogs=32 2024-11-11T20:43:39,172 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C37423%2C1731357817233.meta.1731357819171.meta 2024-11-11T20:43:39,182 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.meta.1731357819171.meta 2024-11-11T20:43:39,185 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44869:44869),(127.0.0.1/127.0.0.1:35551:35551)] 2024-11-11T20:43:39,192 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:43:39,194 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T20:43:39,196 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T20:43:39,201 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T20:43:39,208 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T20:43:39,209 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:43:39,210 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T20:43:39,210 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T20:43:39,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:43:39,218 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:43:39,218 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:39,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:43:39,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:43:39,221 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:43:39,221 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:39,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:43:39,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:43:39,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:43:39,224 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:39,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:43:39,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:43:39,226 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:43:39,226 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:39,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:43:39,227 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:43:39,229 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740 2024-11-11T20:43:39,233 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740 2024-11-11T20:43:39,236 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:43:39,236 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:43:39,237 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:43:39,240 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:43:39,242 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714731, jitterRate=-0.09117288887500763}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:43:39,243 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T20:43:39,244 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731357819211Writing region info on filesystem at 1731357819211Initializing all the Stores at 1731357819213 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357819213Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357819216 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357819216Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357819216Cleaning up temporary data from old regions at 1731357819237 (+21 ms)Running coprocessor post-open hooks at 1731357819243 (+6 ms)Region opened successfully at 1731357819244 (+1 ms) 2024-11-11T20:43:39,251 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731357819143 2024-11-11T20:43:39,264 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T20:43:39,264 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T20:43:39,266 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:39,268 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,37423,1731357817233, state=OPEN 2024-11-11T20:43:39,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:43:39,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:43:39,271 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:43:39,271 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:43:39,272 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:39,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T20:43:39,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,37423,1731357817233 in 297 msec 2024-11-11T20:43:39,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T20:43:39,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 750 msec 2024-11-11T20:43:39,288 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:43:39,288 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T20:43:39,307 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:43:39,308 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,37423,1731357817233, seqNum=-1] 2024-11-11T20:43:39,328 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:43:39,331 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45601, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:43:39,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1480 sec 2024-11-11T20:43:39,354 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731357819354, completionTime=-1 2024-11-11T20:43:39,357 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T20:43:39,357 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T20:43:39,384 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T20:43:39,385 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731357879384 2024-11-11T20:43:39,385 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731357939385 2024-11-11T20:43:39,385 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-11-11T20:43:39,387 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39595,1731357816356-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:39,388 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39595,1731357816356-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:39,388 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39595,1731357816356-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:39,390 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-51ca66f7ee3c:39595, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:39,390 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:39,391 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:39,398 DEBUG [master/51ca66f7ee3c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T20:43:39,422 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.032sec 2024-11-11T20:43:39,424 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T20:43:39,425 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T20:43:39,426 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T20:43:39,426 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T20:43:39,426 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T20:43:39,427 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39595,1731357816356-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:43:39,428 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39595,1731357816356-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T20:43:39,435 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T20:43:39,436 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T20:43:39,437 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39595,1731357816356-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:43:39,455 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b598c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:43:39,457 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-11T20:43:39,458 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-11T20:43:39,462 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 51ca66f7ee3c,39595,-1 for getting cluster id 2024-11-11T20:43:39,466 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T20:43:39,476 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ee9776d7-c8b1-4d74-833b-6b57385f7473' 2024-11-11T20:43:39,479 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T20:43:39,479 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ee9776d7-c8b1-4d74-833b-6b57385f7473" 2024-11-11T20:43:39,480 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@466f6cf6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:43:39,480 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [51ca66f7ee3c,39595,-1] 2024-11-11T20:43:39,483 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T20:43:39,485 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:43:39,487 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T20:43:39,490 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e15a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:43:39,491 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:43:39,500 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,37423,1731357817233, seqNum=-1] 2024-11-11T20:43:39,500 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:43:39,503 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47616, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:43:39,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=51ca66f7ee3c,39595,1731357816356 2024-11-11T20:43:39,526 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:43:39,533 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T20:43:39,539 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T20:43:39,546 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 51ca66f7ee3c,39595,1731357816356 2024-11-11T20:43:39,549 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@43cb8576 2024-11-11T20:43:39,550 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T20:43:39,553 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54006, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T20:43:39,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T20:43:39,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T20:43:39,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:43:39,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-11T20:43:39,572 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T20:43:39,573 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-11T20:43:39,574 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:39,576 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T20:43:39,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T20:43:39,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741835_1011 (size=389) 2024-11-11T20:43:39,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741835_1011 (size=389) 2024-11-11T20:43:39,616 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bb3997d6cf7f0cd0aa445bde94e43a8a, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1 2024-11-11T20:43:39,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741836_1012 (size=72) 2024-11-11T20:43:39,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741836_1012 (size=72) 2024-11-11T20:43:39,633 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:43:39,633 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing bb3997d6cf7f0cd0aa445bde94e43a8a, disabling compactions & flushes 2024-11-11T20:43:39,633 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:43:39,633 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:43:39,633 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. after waiting 0 ms 2024-11-11T20:43:39,633 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:43:39,633 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:43:39,633 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for bb3997d6cf7f0cd0aa445bde94e43a8a: Waiting for close lock at 1731357819633Disabling compacts and flushes for region at 1731357819633Disabling writes for close at 1731357819633Writing region close event to WAL at 1731357819633Closed at 1731357819633 2024-11-11T20:43:39,636 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T20:43:39,642 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731357819636"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731357819636"}]},"ts":"1731357819636"} 2024-11-11T20:43:39,647 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T20:43:39,649 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T20:43:39,651 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731357819649"}]},"ts":"1731357819649"} 2024-11-11T20:43:39,656 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-11T20:43:39,658 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bb3997d6cf7f0cd0aa445bde94e43a8a, ASSIGN}] 2024-11-11T20:43:39,661 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bb3997d6cf7f0cd0aa445bde94e43a8a, ASSIGN 2024-11-11T20:43:39,663 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bb3997d6cf7f0cd0aa445bde94e43a8a, ASSIGN; state=OFFLINE, location=51ca66f7ee3c,37423,1731357817233; forceNewPlan=false, retain=false 2024-11-11T20:43:39,815 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bb3997d6cf7f0cd0aa445bde94e43a8a, regionState=OPENING, regionLocation=51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:39,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bb3997d6cf7f0cd0aa445bde94e43a8a, ASSIGN because future has completed 2024-11-11T20:43:39,822 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bb3997d6cf7f0cd0aa445bde94e43a8a, server=51ca66f7ee3c,37423,1731357817233}] 2024-11-11T20:43:39,983 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:43:39,983 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bb3997d6cf7f0cd0aa445bde94e43a8a, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:43:39,984 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:39,984 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:43:39,984 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:39,984 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:39,987 INFO [StoreOpener-bb3997d6cf7f0cd0aa445bde94e43a8a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:39,990 INFO [StoreOpener-bb3997d6cf7f0cd0aa445bde94e43a8a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb3997d6cf7f0cd0aa445bde94e43a8a columnFamilyName info 2024-11-11T20:43:39,991 DEBUG [StoreOpener-bb3997d6cf7f0cd0aa445bde94e43a8a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:43:40,008 INFO [StoreOpener-bb3997d6cf7f0cd0aa445bde94e43a8a-1 {}] regionserver.HStore(327): Store=bb3997d6cf7f0cd0aa445bde94e43a8a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:43:40,009 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:40,010 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:40,011 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:40,012 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:40,012 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:40,016 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:40,022 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:43:40,023 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bb3997d6cf7f0cd0aa445bde94e43a8a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712084, jitterRate=-0.09453913569450378}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T20:43:40,024 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:43:40,025 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bb3997d6cf7f0cd0aa445bde94e43a8a: Running coprocessor pre-open hook at 1731357819985Writing region info on filesystem at 1731357819985Initializing all the Stores at 1731357819986 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357819986Cleaning up temporary data from old regions at 1731357820012 (+26 ms)Running coprocessor post-open hooks at 1731357820024 (+12 ms)Region opened successfully at 1731357820024 2024-11-11T20:43:40,027 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a., pid=6, masterSystemTime=1731357819976 2024-11-11T20:43:40,033 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:43:40,033 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:43:40,033 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bb3997d6cf7f0cd0aa445bde94e43a8a, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,37423,1731357817233 2024-11-11T20:43:40,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bb3997d6cf7f0cd0aa445bde94e43a8a, server=51ca66f7ee3c,37423,1731357817233 because future has completed 2024-11-11T20:43:40,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T20:43:40,054 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bb3997d6cf7f0cd0aa445bde94e43a8a, server=51ca66f7ee3c,37423,1731357817233 in 226 msec 2024-11-11T20:43:40,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T20:43:40,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bb3997d6cf7f0cd0aa445bde94e43a8a, ASSIGN in 394 msec 2024-11-11T20:43:40,060 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T20:43:40,061 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731357820060"}]},"ts":"1731357820060"} 2024-11-11T20:43:40,065 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-11T20:43:40,067 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T20:43:40,072 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 506 msec 2024-11-11T20:43:44,714 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T20:43:44,768 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T20:43:44,769 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-11T20:43:46,831 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T20:43:46,831 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T20:43:46,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-11T20:43:46,833 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T20:43:46,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:43:46,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T20:43:46,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T20:43:46,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-11T20:43:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T20:43:49,610 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-11T20:43:49,613 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-11T20:43:49,621 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-11T20:43:49,622 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:43:49,623 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C37423%2C1731357817233.1731357829623 2024-11-11T20:43:49,637 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:43:49,637 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:43:49,637 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:43:49,637 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:43:49,637 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:43:49,638 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357818720 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357829623 2024-11-11T20:43:49,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741833_1009 (size=451) 2024-11-11T20:43:49,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741833_1009 (size=451) 2024-11-11T20:43:49,643 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44869:44869),(127.0.0.1/127.0.0.1:35551:35551)] 2024-11-11T20:43:49,643 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357818720 to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs/51ca66f7ee3c%2C37423%2C1731357817233.1731357818720 2024-11-11T20:43:49,652 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a., hostname=51ca66f7ee3c,37423,1731357817233, seqNum=2] 2024-11-11T20:44:01,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37423 {}] regionserver.HRegion(8855): Flush requested on bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:44:01,699 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb3997d6cf7f0cd0aa445bde94e43a8a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:44:01,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/2c3eed0610a748c2995bbb2e17029a57 is 1080, key is row0001/info:/1731357829656/Put/seqid=0 2024-11-11T20:44:01,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741838_1014 (size=12509) 2024-11-11T20:44:01,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741838_1014 (size=12509) 2024-11-11T20:44:01,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/2c3eed0610a748c2995bbb2e17029a57 2024-11-11T20:44:01,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/2c3eed0610a748c2995bbb2e17029a57 as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/2c3eed0610a748c2995bbb2e17029a57 2024-11-11T20:44:01,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/2c3eed0610a748c2995bbb2e17029a57, entries=7, sequenceid=11, filesize=12.2 K 2024-11-11T20:44:01,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bb3997d6cf7f0cd0aa445bde94e43a8a in 132ms, sequenceid=11, compaction requested=false 2024-11-11T20:44:01,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb3997d6cf7f0cd0aa445bde94e43a8a: 2024-11-11T20:44:05,361 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T20:44:09,722 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C37423%2C1731357817233.1731357849722 2024-11-11T20:44:09,935 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:09,935 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:09,935 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:09,935 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:09,936 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:09,936 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:09,936 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357829623 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357849722 2024-11-11T20:44:09,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741837_1013 (size=12399) 2024-11-11T20:44:09,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741837_1013 (size=12399) 2024-11-11T20:44:09,945 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44869:44869),(127.0.0.1/127.0.0.1:35551:35551)] 2024-11-11T20:44:10,149 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:12,354 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:14,559 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:16,763 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:16,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37423 {}] regionserver.HRegion(8855): Flush requested on bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:44:16,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb3997d6cf7f0cd0aa445bde94e43a8a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:44:16,965 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:16,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/7b0b66134fa04992a176055fdf0ad76f is 1080, key is row0008/info:/1731357843699/Put/seqid=0 2024-11-11T20:44:16,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741840_1016 (size=12509) 2024-11-11T20:44:16,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741840_1016 (size=12509) 2024-11-11T20:44:16,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/7b0b66134fa04992a176055fdf0ad76f 2024-11-11T20:44:16,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/7b0b66134fa04992a176055fdf0ad76f as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/7b0b66134fa04992a176055fdf0ad76f 2024-11-11T20:44:17,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/7b0b66134fa04992a176055fdf0ad76f, entries=7, sequenceid=21, filesize=12.2 K 2024-11-11T20:44:17,206 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:17,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bb3997d6cf7f0cd0aa445bde94e43a8a in 443ms, sequenceid=21, compaction requested=false 2024-11-11T20:44:17,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb3997d6cf7f0cd0aa445bde94e43a8a: 2024-11-11T20:44:17,206 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-11T20:44:17,206 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:44:17,207 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/2c3eed0610a748c2995bbb2e17029a57 because midkey is the same as first or last row 2024-11-11T20:44:18,970 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:19,581 INFO [master/51ca66f7ee3c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T20:44:19,582 INFO [master/51ca66f7ee3c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T20:44:21,176 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:21,181 WARN [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:21,182 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C37423%2C1731357817233:(num 1731357849722) roll requested 2024-11-11T20:44:21,183 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C37423%2C1731357817233.1731357861182 2024-11-11T20:44:21,395 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:21,396 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:21,396 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:21,396 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:21,396 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:21,397 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:21,397 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357849722 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357861182 2024-11-11T20:44:21,398 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44869:44869),(127.0.0.1/127.0.0.1:35551:35551)] 2024-11-11T20:44:21,398 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357849722 is not closed yet, will try archiving it next time 2024-11-11T20:44:21,398 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357829623 to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs/51ca66f7ee3c%2C37423%2C1731357817233.1731357829623 2024-11-11T20:44:21,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741839_1015 (size=7739) 2024-11-11T20:44:21,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741839_1015 (size=7739) 2024-11-11T20:44:23,383 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:24,984 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bb3997d6cf7f0cd0aa445bde94e43a8a, had cached 0 bytes from a total of 25018 2024-11-11T20:44:25,590 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:27,797 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:30,005 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:32,007 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T20:44:32,007 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C37423%2C1731357817233.1731357872007 2024-11-11T20:44:35,361 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T20:44:37,020 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:37,024 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:37,024 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C37423%2C1731357817233:(num 1731357872007) roll requested 2024-11-11T20:44:37,024 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:37,025 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:37,025 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:37,025 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:37,025 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:37,025 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357861182 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357872007 2024-11-11T20:44:37,026 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44869:44869),(127.0.0.1/127.0.0.1:35551:35551)] 2024-11-11T20:44:37,026 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357861182 is not closed yet, will try archiving it next time 2024-11-11T20:44:37,026 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C37423%2C1731357817233.1731357877026 2024-11-11T20:44:37,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741841_1017 (size=4753) 2024-11-11T20:44:37,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741841_1017 (size=4753) 2024-11-11T20:44:42,030 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:42,030 WARN [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37423 {}] regionserver.HRegion(8855): Flush requested on bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:44:42,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb3997d6cf7f0cd0aa445bde94e43a8a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:44:42,037 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:42,037 WARN [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:44,031 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T20:44:47,035 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:47,035 WARN [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK], DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK]] 2024-11-11T20:44:47,035 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:47,036 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:47,036 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:47,037 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:47,038 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:47,039 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357872007 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357877026 2024-11-11T20:44:47,042 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35551:35551),(127.0.0.1/127.0.0.1:44869:44869)] 2024-11-11T20:44:47,042 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357872007 is not closed yet, will try archiving it next time 2024-11-11T20:44:47,042 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C37423%2C1731357817233:(num 1731357877026) roll requested 2024-11-11T20:44:47,043 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C37423%2C1731357817233.1731357887043 2024-11-11T20:44:47,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741842_1018 (size=1569) 2024-11-11T20:44:47,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741842_1018 (size=1569) 2024-11-11T20:44:47,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/f77c80bcd58d43ee9ff3ea21e9fd8e09 is 1080, key is row0015/info:/1731357858766/Put/seqid=0 2024-11-11T20:44:47,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741844_1020 (size=12509) 2024-11-11T20:44:47,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741844_1020 (size=12509) 2024-11-11T20:44:47,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/f77c80bcd58d43ee9ff3ea21e9fd8e09 2024-11-11T20:44:47,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/f77c80bcd58d43ee9ff3ea21e9fd8e09 as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/f77c80bcd58d43ee9ff3ea21e9fd8e09 2024-11-11T20:44:47,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/f77c80bcd58d43ee9ff3ea21e9fd8e09, entries=7, sequenceid=31, filesize=12.2 K 2024-11-11T20:44:52,057 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK], DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK]] 2024-11-11T20:44:52,057 WARN [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK], DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK]] 2024-11-11T20:44:52,077 INFO [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK], DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK]] 2024-11-11T20:44:52,077 WARN [FSHLog-0-hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1-prefix:51ca66f7ee3c,37423,1731357817233 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34943,DS-d17006ea-ccdb-403c-ba2a-171fcf622c2a,DISK], DatanodeInfoWithStorage[127.0.0.1:36315,DS-e0d6e228-645c-44f5-b9f9-97024982914f,DISK]] 2024-11-11T20:44:52,077 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bb3997d6cf7f0cd0aa445bde94e43a8a in 10047ms, sequenceid=31, compaction requested=true 2024-11-11T20:44:52,078 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb3997d6cf7f0cd0aa445bde94e43a8a: 2024-11-11T20:44:52,078 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,078 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-11T20:44:52,078 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,079 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:44:52,079 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,079 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/2c3eed0610a748c2995bbb2e17029a57 because midkey is the same as first or last row 2024-11-11T20:44:52,079 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357877026 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357887043 2024-11-11T20:44:52,081 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35551:35551),(127.0.0.1/127.0.0.1:44869:44869)] 2024-11-11T20:44:52,081 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357877026 is not closed yet, will try archiving it next time 2024-11-11T20:44:52,081 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357849722 to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs/51ca66f7ee3c%2C37423%2C1731357817233.1731357849722 2024-11-11T20:44:52,082 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C37423%2C1731357817233:(num 1731357892082) roll requested 2024-11-11T20:44:52,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb3997d6cf7f0cd0aa445bde94e43a8a:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:44:52,082 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C37423%2C1731357817233.1731357892082 2024-11-11T20:44:52,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741843_1019 (size=438) 2024-11-11T20:44:52,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741843_1019 (size=438) 2024-11-11T20:44:52,085 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357861182 to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs/51ca66f7ee3c%2C37423%2C1731357817233.1731357861182 2024-11-11T20:44:52,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:44:52,086 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:44:52,087 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357872007 to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs/51ca66f7ee3c%2C37423%2C1731357817233.1731357872007 2024-11-11T20:44:52,089 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357877026 to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs/51ca66f7ee3c%2C37423%2C1731357817233.1731357877026 2024-11-11T20:44:52,089 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:44:52,091 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.HStore(1541): bb3997d6cf7f0cd0aa445bde94e43a8a/info is initiating minor compaction (all files) 2024-11-11T20:44:52,092 INFO [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb3997d6cf7f0cd0aa445bde94e43a8a/info in TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:44:52,092 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,092 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,092 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,093 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,093 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,093 INFO [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/2c3eed0610a748c2995bbb2e17029a57, hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/7b0b66134fa04992a176055fdf0ad76f, hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/f77c80bcd58d43ee9ff3ea21e9fd8e09] into tmpdir=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp, totalSize=36.6 K 2024-11-11T20:44:52,093 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357887043 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357892082 2024-11-11T20:44:52,094 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2c3eed0610a748c2995bbb2e17029a57, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731357829656 2024-11-11T20:44:52,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741845_1021 (size=93) 2024-11-11T20:44:52,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741845_1021 (size=93) 2024-11-11T20:44:52,095 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b0b66134fa04992a176055fdf0ad76f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731357843699 2024-11-11T20:44:52,096 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357887043 to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs/51ca66f7ee3c%2C37423%2C1731357817233.1731357887043 2024-11-11T20:44:52,096 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] compactions.Compactor(225): Compacting f77c80bcd58d43ee9ff3ea21e9fd8e09, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731357858766 2024-11-11T20:44:52,105 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44869:44869),(127.0.0.1/127.0.0.1:35551:35551)] 2024-11-11T20:44:52,106 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C37423%2C1731357817233.1731357892105 2024-11-11T20:44:52,123 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,123 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,123 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,123 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,123 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:44:52,124 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357892082 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/WALs/51ca66f7ee3c,37423,1731357817233/51ca66f7ee3c%2C37423%2C1731357817233.1731357892105 2024-11-11T20:44:52,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741846_1022 (size=1258) 2024-11-11T20:44:52,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741846_1022 (size=1258) 2024-11-11T20:44:52,133 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44869:44869),(127.0.0.1/127.0.0.1:35551:35551)] 2024-11-11T20:44:52,137 INFO [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb3997d6cf7f0cd0aa445bde94e43a8a#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:44:52,138 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/9ecef48ed3084f9ea6189d6b566e9754 is 1080, key is row0001/info:/1731357829656/Put/seqid=0 2024-11-11T20:44:52,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741848_1024 (size=27710) 2024-11-11T20:44:52,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741848_1024 (size=27710) 2024-11-11T20:44:52,159 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/9ecef48ed3084f9ea6189d6b566e9754 as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/9ecef48ed3084f9ea6189d6b566e9754 2024-11-11T20:44:52,177 INFO [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb3997d6cf7f0cd0aa445bde94e43a8a/info of bb3997d6cf7f0cd0aa445bde94e43a8a into 9ecef48ed3084f9ea6189d6b566e9754(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:44:52,177 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb3997d6cf7f0cd0aa445bde94e43a8a: 2024-11-11T20:44:52,179 INFO [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a., storeName=bb3997d6cf7f0cd0aa445bde94e43a8a/info, priority=13, startTime=1731357892082; duration=0sec 2024-11-11T20:44:52,179 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T20:44:52,179 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:44:52,179 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/9ecef48ed3084f9ea6189d6b566e9754 because midkey is the same as first or last row 2024-11-11T20:44:52,180 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T20:44:52,180 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:44:52,180 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/9ecef48ed3084f9ea6189d6b566e9754 because midkey is the same as first or last row 2024-11-11T20:44:52,180 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T20:44:52,180 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:44:52,180 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/9ecef48ed3084f9ea6189d6b566e9754 because midkey is the same as first or last row 2024-11-11T20:44:52,180 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:44:52,180 DEBUG [RS:0;51ca66f7ee3c:37423-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb3997d6cf7f0cd0aa445bde94e43a8a:info 2024-11-11T20:45:04,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37423 {}] regionserver.HRegion(8855): Flush requested on bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:45:04,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb3997d6cf7f0cd0aa445bde94e43a8a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:45:04,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/4e42fa839198421a9012cfbcbf29ad02 is 1080, key is row0022/info:/1731357892107/Put/seqid=0 2024-11-11T20:45:04,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741849_1025 (size=12509) 2024-11-11T20:45:04,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741849_1025 (size=12509) 2024-11-11T20:45:04,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/4e42fa839198421a9012cfbcbf29ad02 2024-11-11T20:45:04,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/4e42fa839198421a9012cfbcbf29ad02 as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/4e42fa839198421a9012cfbcbf29ad02 2024-11-11T20:45:04,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/4e42fa839198421a9012cfbcbf29ad02, entries=7, sequenceid=42, filesize=12.2 K 2024-11-11T20:45:04,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bb3997d6cf7f0cd0aa445bde94e43a8a in 438ms, sequenceid=42, compaction requested=false 2024-11-11T20:45:04,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb3997d6cf7f0cd0aa445bde94e43a8a: 2024-11-11T20:45:04,590 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-11T20:45:04,590 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:04,590 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/9ecef48ed3084f9ea6189d6b566e9754 because midkey is the same as first or last row 2024-11-11T20:45:05,362 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T20:45:09,985 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bb3997d6cf7f0cd0aa445bde94e43a8a, had cached 0 bytes from a total of 40219 2024-11-11T20:45:12,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T20:45:12,177 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:45:12,178 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:45:12,187 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:12,187 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:12,187 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T20:45:12,188 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T20:45:12,188 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=715057171, stopped=false 2024-11-11T20:45:12,188 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=51ca66f7ee3c,39595,1731357816356 2024-11-11T20:45:12,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:12,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:12,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:12,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:12,190 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:45:12,191 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:45:12,191 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:45:12,191 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:12,191 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:12,192 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:12,192 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '51ca66f7ee3c,37423,1731357817233' ***** 2024-11-11T20:45:12,192 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T20:45:12,192 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T20:45:12,193 INFO [RS:0;51ca66f7ee3c:37423 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T20:45:12,193 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T20:45:12,193 INFO [RS:0;51ca66f7ee3c:37423 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T20:45:12,193 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(3091): Received CLOSE for bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:45:12,193 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(959): stopping server 51ca66f7ee3c,37423,1731357817233 2024-11-11T20:45:12,194 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:45:12,194 INFO [RS:0;51ca66f7ee3c:37423 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;51ca66f7ee3c:37423. 2024-11-11T20:45:12,194 DEBUG [RS:0;51ca66f7ee3c:37423 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:45:12,194 DEBUG [RS:0;51ca66f7ee3c:37423 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:12,194 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bb3997d6cf7f0cd0aa445bde94e43a8a, disabling compactions & flushes 2024-11-11T20:45:12,194 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T20:45:12,194 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:45:12,194 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T20:45:12,194 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:45:12,194 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T20:45:12,194 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. after waiting 0 ms 2024-11-11T20:45:12,194 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T20:45:12,194 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:45:12,194 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing bb3997d6cf7f0cd0aa445bde94e43a8a 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-11T20:45:12,194 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T20:45:12,194 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:45:12,195 DEBUG [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1325): Online Regions={bb3997d6cf7f0cd0aa445bde94e43a8a=TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T20:45:12,195 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:45:12,195 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:45:12,195 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:45:12,195 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:45:12,195 DEBUG [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, bb3997d6cf7f0cd0aa445bde94e43a8a 2024-11-11T20:45:12,195 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-11T20:45:12,199 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/af27a849c70d4606a4fcd1cf1775ecd0 is 1080, key is row0029/info:/1731357906156/Put/seqid=0 2024-11-11T20:45:12,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741850_1026 (size=8193) 2024-11-11T20:45:12,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741850_1026 (size=8193) 2024-11-11T20:45:12,206 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/af27a849c70d4606a4fcd1cf1775ecd0 2024-11-11T20:45:12,215 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/.tmp/info/af27a849c70d4606a4fcd1cf1775ecd0 as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/af27a849c70d4606a4fcd1cf1775ecd0 2024-11-11T20:45:12,219 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/.tmp/info/9691ec2a5139418096f4d8cb0ca7a901 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a./info:regioninfo/1731357820033/Put/seqid=0 2024-11-11T20:45:12,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741851_1027 (size=7016) 2024-11-11T20:45:12,224 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/af27a849c70d4606a4fcd1cf1775ecd0, entries=3, sequenceid=48, filesize=8.0 K 2024-11-11T20:45:12,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741851_1027 (size=7016) 2024-11-11T20:45:12,225 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/.tmp/info/9691ec2a5139418096f4d8cb0ca7a901 2024-11-11T20:45:12,226 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for bb3997d6cf7f0cd0aa445bde94e43a8a in 32ms, sequenceid=48, compaction requested=true 2024-11-11T20:45:12,226 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/2c3eed0610a748c2995bbb2e17029a57, hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/7b0b66134fa04992a176055fdf0ad76f, hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/f77c80bcd58d43ee9ff3ea21e9fd8e09] to archive 2024-11-11T20:45:12,229 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T20:45:12,232 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/2c3eed0610a748c2995bbb2e17029a57 to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/2c3eed0610a748c2995bbb2e17029a57 2024-11-11T20:45:12,235 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/7b0b66134fa04992a176055fdf0ad76f to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/7b0b66134fa04992a176055fdf0ad76f 2024-11-11T20:45:12,236 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/f77c80bcd58d43ee9ff3ea21e9fd8e09 to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/info/f77c80bcd58d43ee9ff3ea21e9fd8e09 2024-11-11T20:45:12,250 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=51ca66f7ee3c:39595 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T20:45:12,255 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [2c3eed0610a748c2995bbb2e17029a57=12509, 7b0b66134fa04992a176055fdf0ad76f=12509, f77c80bcd58d43ee9ff3ea21e9fd8e09=12509] 2024-11-11T20:45:12,255 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/.tmp/ns/734492d1ff264674b7782f1ba7e63712 is 43, key is default/ns:d/1731357819336/Put/seqid=0 2024-11-11T20:45:12,263 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/default/TestLogRolling-testSlowSyncLogRolling/bb3997d6cf7f0cd0aa445bde94e43a8a/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-11T20:45:12,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741852_1028 (size=5153) 2024-11-11T20:45:12,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741852_1028 (size=5153) 2024-11-11T20:45:12,266 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:45:12,266 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bb3997d6cf7f0cd0aa445bde94e43a8a: Waiting for close lock at 1731357912194Running coprocessor pre-close hooks at 1731357912194Disabling compacts and flushes for region at 1731357912194Disabling writes for close at 1731357912194Obtaining lock to block concurrent updates at 1731357912194Preparing flush snapshotting stores in bb3997d6cf7f0cd0aa445bde94e43a8a at 1731357912194Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731357912195 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. at 1731357912196 (+1 ms)Flushing bb3997d6cf7f0cd0aa445bde94e43a8a/info: creating writer at 1731357912196Flushing bb3997d6cf7f0cd0aa445bde94e43a8a/info: appending metadata at 1731357912199 (+3 ms)Flushing bb3997d6cf7f0cd0aa445bde94e43a8a/info: closing flushed file at 1731357912199Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@741cdd5d: reopening flushed file at 1731357912214 (+15 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for bb3997d6cf7f0cd0aa445bde94e43a8a in 32ms, sequenceid=48, compaction requested=true at 1731357912226 (+12 ms)Writing region close event to WAL at 1731357912256 (+30 ms)Running coprocessor post-close hooks at 1731357912264 (+8 ms)Closed at 1731357912266 (+2 ms) 2024-11-11T20:45:12,267 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731357819555.bb3997d6cf7f0cd0aa445bde94e43a8a. 2024-11-11T20:45:12,395 DEBUG [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T20:45:12,550 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T20:45:12,550 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T20:45:12,552 INFO [regionserver/51ca66f7ee3c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:45:12,595 DEBUG [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T20:45:12,667 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/.tmp/ns/734492d1ff264674b7782f1ba7e63712 2024-11-11T20:45:12,698 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/.tmp/table/f55fbe7d008e4548b6b2cef1e1b4c4a8 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731357820060/Put/seqid=0 2024-11-11T20:45:12,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741853_1029 (size=5396) 2024-11-11T20:45:12,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741853_1029 (size=5396) 2024-11-11T20:45:12,704 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/.tmp/table/f55fbe7d008e4548b6b2cef1e1b4c4a8 2024-11-11T20:45:12,712 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/.tmp/info/9691ec2a5139418096f4d8cb0ca7a901 as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/info/9691ec2a5139418096f4d8cb0ca7a901 2024-11-11T20:45:12,720 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/info/9691ec2a5139418096f4d8cb0ca7a901, entries=10, sequenceid=11, filesize=6.9 K 2024-11-11T20:45:12,721 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/.tmp/ns/734492d1ff264674b7782f1ba7e63712 as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/ns/734492d1ff264674b7782f1ba7e63712 2024-11-11T20:45:12,729 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/ns/734492d1ff264674b7782f1ba7e63712, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T20:45:12,730 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/.tmp/table/f55fbe7d008e4548b6b2cef1e1b4c4a8 as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/table/f55fbe7d008e4548b6b2cef1e1b4c4a8 2024-11-11T20:45:12,737 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/table/f55fbe7d008e4548b6b2cef1e1b4c4a8, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T20:45:12,739 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 543ms, sequenceid=11, compaction requested=false 2024-11-11T20:45:12,744 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T20:45:12,745 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:45:12,745 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:45:12,745 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731357912194Running coprocessor pre-close hooks at 1731357912194Disabling compacts and flushes for region at 1731357912194Disabling writes for close at 1731357912195 (+1 ms)Obtaining lock to block concurrent updates at 1731357912195Preparing flush snapshotting stores in 1588230740 at 1731357912195Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731357912195Flushing stores of hbase:meta,,1.1588230740 at 1731357912196 (+1 ms)Flushing 1588230740/info: creating writer at 1731357912196Flushing 1588230740/info: appending metadata at 1731357912218 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731357912218Flushing 1588230740/ns: creating writer at 1731357912234 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731357912254 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731357912254Flushing 1588230740/table: creating writer at 1731357912682 (+428 ms)Flushing 1588230740/table: appending metadata at 1731357912698 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731357912698Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6eab5baa: reopening flushed file at 1731357912711 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@617a1598: reopening flushed file at 1731357912720 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7743d4d4: reopening flushed file at 1731357912729 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 543ms, sequenceid=11, compaction requested=false at 1731357912739 (+10 ms)Writing region close event to WAL at 1731357912740 (+1 ms)Running coprocessor post-close hooks at 1731357912745 (+5 ms)Closed at 1731357912745 2024-11-11T20:45:12,745 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T20:45:12,796 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(976): stopping server 51ca66f7ee3c,37423,1731357817233; all regions closed. 2024-11-11T20:45:12,799 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,799 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,799 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,800 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,800 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741834_1010 (size=3066) 2024-11-11T20:45:12,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741834_1010 (size=3066) 2024-11-11T20:45:12,811 DEBUG [RS:0;51ca66f7ee3c:37423 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs 2024-11-11T20:45:12,811 INFO [RS:0;51ca66f7ee3c:37423 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C37423%2C1731357817233.meta:.meta(num 1731357819171) 2024-11-11T20:45:12,811 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,812 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,812 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,812 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,812 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741847_1023 (size=12695) 2024-11-11T20:45:12,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741847_1023 (size=12695) 2024-11-11T20:45:12,818 DEBUG [RS:0;51ca66f7ee3c:37423 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/oldWALs 2024-11-11T20:45:12,818 INFO [RS:0;51ca66f7ee3c:37423 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C37423%2C1731357817233:(num 1731357892105) 2024-11-11T20:45:12,818 DEBUG [RS:0;51ca66f7ee3c:37423 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:12,819 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:45:12,819 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:45:12,819 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.ChoreService(370): Chore service for: regionserver/51ca66f7ee3c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T20:45:12,819 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:45:12,819 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:45:12,819 INFO [RS:0;51ca66f7ee3c:37423 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37423 2024-11-11T20:45:12,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/51ca66f7ee3c,37423,1731357817233 2024-11-11T20:45:12,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:45:12,822 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:45:12,824 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [51ca66f7ee3c,37423,1731357817233] 2024-11-11T20:45:12,825 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/51ca66f7ee3c,37423,1731357817233 already deleted, retry=false 2024-11-11T20:45:12,825 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 51ca66f7ee3c,37423,1731357817233 expired; onlineServers=0 2024-11-11T20:45:12,825 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '51ca66f7ee3c,39595,1731357816356' ***** 2024-11-11T20:45:12,825 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T20:45:12,825 INFO [M:0;51ca66f7ee3c:39595 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:45:12,825 INFO [M:0;51ca66f7ee3c:39595 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:45:12,826 DEBUG [M:0;51ca66f7ee3c:39595 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T20:45:12,826 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T20:45:12,826 DEBUG [M:0;51ca66f7ee3c:39595 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T20:45:12,826 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357818398 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357818398,5,FailOnTimeoutGroup] 2024-11-11T20:45:12,826 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357818389 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357818389,5,FailOnTimeoutGroup] 2024-11-11T20:45:12,826 INFO [M:0;51ca66f7ee3c:39595 {}] hbase.ChoreService(370): Chore service for: master/51ca66f7ee3c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T20:45:12,826 INFO [M:0;51ca66f7ee3c:39595 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:45:12,826 DEBUG [M:0;51ca66f7ee3c:39595 {}] master.HMaster(1795): Stopping service threads 2024-11-11T20:45:12,826 INFO [M:0;51ca66f7ee3c:39595 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T20:45:12,826 INFO [M:0;51ca66f7ee3c:39595 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:45:12,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T20:45:12,827 INFO [M:0;51ca66f7ee3c:39595 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T20:45:12,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:12,827 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T20:45:12,827 DEBUG [M:0;51ca66f7ee3c:39595 {}] zookeeper.ZKUtil(347): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T20:45:12,827 WARN [M:0;51ca66f7ee3c:39595 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T20:45:12,828 INFO [M:0;51ca66f7ee3c:39595 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/.lastflushedseqids 2024-11-11T20:45:12,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741854_1030 (size=130) 2024-11-11T20:45:12,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741854_1030 (size=130) 2024-11-11T20:45:12,840 INFO [M:0;51ca66f7ee3c:39595 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T20:45:12,840 INFO [M:0;51ca66f7ee3c:39595 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T20:45:12,840 DEBUG [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:45:12,840 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:12,840 DEBUG [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:12,840 DEBUG [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:45:12,840 DEBUG [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:12,841 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-11T20:45:12,858 DEBUG [M:0;51ca66f7ee3c:39595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/283b1f6009324bbf8a8659950aa4d79e is 82, key is hbase:meta,,1/info:regioninfo/1731357819265/Put/seqid=0 2024-11-11T20:45:12,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741855_1031 (size=5672) 2024-11-11T20:45:12,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741855_1031 (size=5672) 2024-11-11T20:45:12,865 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/283b1f6009324bbf8a8659950aa4d79e 2024-11-11T20:45:12,886 DEBUG [M:0;51ca66f7ee3c:39595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ed4b31cba2f04da1928917a92971861d is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731357820071/Put/seqid=0 2024-11-11T20:45:12,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741856_1032 (size=6247) 2024-11-11T20:45:12,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741856_1032 (size=6247) 2024-11-11T20:45:12,892 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ed4b31cba2f04da1928917a92971861d 2024-11-11T20:45:12,898 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ed4b31cba2f04da1928917a92971861d 2024-11-11T20:45:12,914 DEBUG [M:0;51ca66f7ee3c:39595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/33e6760ccf544baab2b94f7e6ae89050 is 69, key is 51ca66f7ee3c,37423,1731357817233/rs:state/1731357818436/Put/seqid=0 2024-11-11T20:45:12,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741857_1033 (size=5156) 2024-11-11T20:45:12,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741857_1033 (size=5156) 2024-11-11T20:45:12,920 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/33e6760ccf544baab2b94f7e6ae89050 2024-11-11T20:45:12,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:45:12,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37423-0x10030870ef70001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:45:12,925 INFO [RS:0;51ca66f7ee3c:37423 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:45:12,925 INFO [RS:0;51ca66f7ee3c:37423 {}] regionserver.HRegionServer(1031): Exiting; stopping=51ca66f7ee3c,37423,1731357817233; zookeeper connection closed. 2024-11-11T20:45:12,925 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5b8077ad {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5b8077ad 2024-11-11T20:45:12,926 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T20:45:12,941 DEBUG [M:0;51ca66f7ee3c:39595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8e40afbc5c1b42008ef8b6a9e8d9b56f is 52, key is load_balancer_on/state:d/1731357819530/Put/seqid=0 2024-11-11T20:45:12,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741858_1034 (size=5056) 2024-11-11T20:45:12,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741858_1034 (size=5056) 2024-11-11T20:45:12,948 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8e40afbc5c1b42008ef8b6a9e8d9b56f 2024-11-11T20:45:12,954 DEBUG [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/283b1f6009324bbf8a8659950aa4d79e as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/283b1f6009324bbf8a8659950aa4d79e 2024-11-11T20:45:12,961 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/283b1f6009324bbf8a8659950aa4d79e, entries=8, sequenceid=59, filesize=5.5 K 2024-11-11T20:45:12,962 DEBUG [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ed4b31cba2f04da1928917a92971861d as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ed4b31cba2f04da1928917a92971861d 2024-11-11T20:45:12,969 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ed4b31cba2f04da1928917a92971861d 2024-11-11T20:45:12,969 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ed4b31cba2f04da1928917a92971861d, entries=6, sequenceid=59, filesize=6.1 K 2024-11-11T20:45:12,971 DEBUG [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/33e6760ccf544baab2b94f7e6ae89050 as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/33e6760ccf544baab2b94f7e6ae89050 2024-11-11T20:45:12,977 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/33e6760ccf544baab2b94f7e6ae89050, entries=1, sequenceid=59, filesize=5.0 K 2024-11-11T20:45:12,978 DEBUG [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8e40afbc5c1b42008ef8b6a9e8d9b56f as hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8e40afbc5c1b42008ef8b6a9e8d9b56f 2024-11-11T20:45:12,985 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8e40afbc5c1b42008ef8b6a9e8d9b56f, entries=1, sequenceid=59, filesize=4.9 K 2024-11-11T20:45:12,986 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=59, compaction requested=false 2024-11-11T20:45:12,988 INFO [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:12,988 DEBUG [M:0;51ca66f7ee3c:39595 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731357912840Disabling compacts and flushes for region at 1731357912840Disabling writes for close at 1731357912840Obtaining lock to block concurrent updates at 1731357912841 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731357912841Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731357912841Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731357912842 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731357912842Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731357912858 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731357912858Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731357912871 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731357912885 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731357912885Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731357912898 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731357912913 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731357912913Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731357912927 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731357912940 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731357912940Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3bd6ae59: reopening flushed file at 1731357912953 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32ecfadf: reopening flushed file at 1731357912961 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ddb0108: reopening flushed file at 1731357912970 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68a97e38: reopening flushed file at 1731357912977 (+7 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=59, compaction requested=false at 1731357912986 (+9 ms)Writing region close event to WAL at 1731357912988 (+2 ms)Closed at 1731357912988 2024-11-11T20:45:12,989 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,989 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,989 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,989 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,989 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:12,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36315 is added to blk_1073741830_1006 (size=27973) 2024-11-11T20:45:12,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34943 is added to blk_1073741830_1006 (size=27973) 2024-11-11T20:45:12,992 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:45:12,992 INFO [M:0;51ca66f7ee3c:39595 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T20:45:12,992 INFO [M:0;51ca66f7ee3c:39595 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39595 2024-11-11T20:45:12,992 INFO [M:0;51ca66f7ee3c:39595 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:45:13,094 INFO [M:0;51ca66f7ee3c:39595 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:45:13,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:45:13,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39595-0x10030870ef70000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:45:13,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:13,100 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:45:13,100 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:45:13,100 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:45:13,100 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/hadoop.log.dir/,STOPPED} 2024-11-11T20:45:13,103 WARN [BP-2014057948-172.17.0.2-1731357813118 heartbeating to localhost/127.0.0.1:43051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:45:13,103 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:45:13,103 WARN [BP-2014057948-172.17.0.2-1731357813118 heartbeating to localhost/127.0.0.1:43051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2014057948-172.17.0.2-1731357813118 (Datanode Uuid f5687c13-e644-47f6-9438-ade2898dfaff) service to localhost/127.0.0.1:43051 2024-11-11T20:45:13,103 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:45:13,105 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/data/data3/current/BP-2014057948-172.17.0.2-1731357813118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:13,105 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/data/data4/current/BP-2014057948-172.17.0.2-1731357813118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:13,105 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:45:13,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:13,108 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:45:13,108 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:45:13,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:45:13,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/hadoop.log.dir/,STOPPED} 2024-11-11T20:45:13,109 WARN [BP-2014057948-172.17.0.2-1731357813118 heartbeating to localhost/127.0.0.1:43051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:45:13,109 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:45:13,109 WARN [BP-2014057948-172.17.0.2-1731357813118 heartbeating to localhost/127.0.0.1:43051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2014057948-172.17.0.2-1731357813118 (Datanode Uuid 820ed1f1-7b25-47ed-9c76-4fffdf22c9c0) service to localhost/127.0.0.1:43051 2024-11-11T20:45:13,109 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:45:13,110 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/data/data1/current/BP-2014057948-172.17.0.2-1731357813118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:13,110 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/cluster_9b6cc4bb-4f3e-38bf-dbe5-5cf01a4f98fe/data/data2/current/BP-2014057948-172.17.0.2-1731357813118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:13,111 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:45:13,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:45:13,119 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:45:13,119 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:45:13,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:45:13,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/hadoop.log.dir/,STOPPED} 2024-11-11T20:45:13,126 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T20:45:13,156 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T20:45:13,166 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:43051 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43051 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/51ca66f7ee3c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43051 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/51ca66f7ee3c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@2a3f5a0d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43051 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43051 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/51ca66f7ee3c:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=154 (was 278), ProcessCount=11 (was 11), AvailableMemoryMB=5063 (was 5924) 2024-11-11T20:45:13,173 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=154, ProcessCount=11, AvailableMemoryMB=5062 2024-11-11T20:45:13,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T20:45:13,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/hadoop.log.dir so I do NOT create it in target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6 2024-11-11T20:45:13,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cf855f3-5a89-7dc7-3541-2e0c179ae50d/hadoop.tmp.dir so I do NOT create it in target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6 2024-11-11T20:45:13,174 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4, deleteOnExit=true 2024-11-11T20:45:13,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T20:45:13,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/test.cache.data in system properties and HBase conf 2024-11-11T20:45:13,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T20:45:13,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/hadoop.log.dir in system properties and HBase conf 2024-11-11T20:45:13,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T20:45:13,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T20:45:13,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T20:45:13,175 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T20:45:13,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:45:13,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:45:13,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T20:45:13,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:45:13,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T20:45:13,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T20:45:13,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:45:13,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:45:13,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T20:45:13,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/nfs.dump.dir in system properties and HBase conf 2024-11-11T20:45:13,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/java.io.tmpdir in system properties and HBase conf 2024-11-11T20:45:13,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:45:13,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T20:45:13,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T20:45:13,189 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:45:13,242 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:13,249 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:45:13,250 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:45:13,250 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:45:13,250 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:45:13,251 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:13,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@534816f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:45:13,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bd9c5b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:45:13,346 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14a3b236{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/java.io.tmpdir/jetty-localhost-39569-hadoop-hdfs-3_4_1-tests_jar-_-any-7563883945393818308/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:45:13,346 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@630e1a46{HTTP/1.1, (http/1.1)}{localhost:39569} 2024-11-11T20:45:13,347 INFO [Time-limited test {}] server.Server(415): Started @102571ms 2024-11-11T20:45:13,358 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:45:13,410 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:13,414 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:45:13,415 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:45:13,415 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:45:13,415 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T20:45:13,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68b9cf2c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:45:13,416 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55f7876e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:45:13,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f841e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/java.io.tmpdir/jetty-localhost-37371-hadoop-hdfs-3_4_1-tests_jar-_-any-4557299662688118234/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:13,511 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3270c9ae{HTTP/1.1, (http/1.1)}{localhost:37371} 2024-11-11T20:45:13,511 INFO [Time-limited test {}] server.Server(415): Started @102735ms 2024-11-11T20:45:13,512 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:45:13,545 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:13,551 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:45:13,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:45:13,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:45:13,552 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:45:13,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4da0f787{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:45:13,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@463a48f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:45:13,579 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/data/data1/current/BP-403757501-172.17.0.2-1731357913200/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:13,580 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/data/data2/current/BP-403757501-172.17.0.2-1731357913200/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:13,601 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:45:13,605 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7d967c73ff0b22d with lease ID 0x1dd92465b45b3da4: Processing first storage report for DS-fbd04425-82bf-41a9-b257-8db5031b82c1 from datanode DatanodeRegistration(127.0.0.1:35321, datanodeUuid=c7c349b1-0868-4a48-9c39-432e1e18b88d, infoPort=42837, infoSecurePort=0, ipcPort=37877, storageInfo=lv=-57;cid=testClusterID;nsid=1848861161;c=1731357913200) 2024-11-11T20:45:13,605 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7d967c73ff0b22d with lease ID 0x1dd92465b45b3da4: from storage DS-fbd04425-82bf-41a9-b257-8db5031b82c1 node DatanodeRegistration(127.0.0.1:35321, datanodeUuid=c7c349b1-0868-4a48-9c39-432e1e18b88d, infoPort=42837, infoSecurePort=0, ipcPort=37877, storageInfo=lv=-57;cid=testClusterID;nsid=1848861161;c=1731357913200), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:13,605 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7d967c73ff0b22d with lease ID 0x1dd92465b45b3da4: Processing first storage report for DS-704acf0b-2157-46fd-a5c8-8d08846b0afd from datanode DatanodeRegistration(127.0.0.1:35321, datanodeUuid=c7c349b1-0868-4a48-9c39-432e1e18b88d, infoPort=42837, infoSecurePort=0, ipcPort=37877, storageInfo=lv=-57;cid=testClusterID;nsid=1848861161;c=1731357913200) 2024-11-11T20:45:13,605 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7d967c73ff0b22d with lease ID 0x1dd92465b45b3da4: from storage DS-704acf0b-2157-46fd-a5c8-8d08846b0afd node DatanodeRegistration(127.0.0.1:35321, datanodeUuid=c7c349b1-0868-4a48-9c39-432e1e18b88d, infoPort=42837, infoSecurePort=0, ipcPort=37877, storageInfo=lv=-57;cid=testClusterID;nsid=1848861161;c=1731357913200), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:13,656 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@788d8945{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/java.io.tmpdir/jetty-localhost-43389-hadoop-hdfs-3_4_1-tests_jar-_-any-2457775448890266738/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:13,657 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c30f553{HTTP/1.1, (http/1.1)}{localhost:43389} 2024-11-11T20:45:13,657 INFO [Time-limited test {}] server.Server(415): Started @102881ms 2024-11-11T20:45:13,658 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:45:13,724 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/data/data3/current/BP-403757501-172.17.0.2-1731357913200/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:13,724 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/data/data4/current/BP-403757501-172.17.0.2-1731357913200/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:13,743 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:45:13,745 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x779a824e29903b6 with lease ID 0x1dd92465b45b3da5: Processing first storage report for DS-dde7e985-6513-4264-a888-ac6193bc719c from datanode DatanodeRegistration(127.0.0.1:41057, datanodeUuid=fef661c1-7bfe-4486-bd37-b4e67335c2b1, infoPort=34335, infoSecurePort=0, ipcPort=35085, storageInfo=lv=-57;cid=testClusterID;nsid=1848861161;c=1731357913200) 2024-11-11T20:45:13,746 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x779a824e29903b6 with lease ID 0x1dd92465b45b3da5: from storage DS-dde7e985-6513-4264-a888-ac6193bc719c node DatanodeRegistration(127.0.0.1:41057, datanodeUuid=fef661c1-7bfe-4486-bd37-b4e67335c2b1, infoPort=34335, infoSecurePort=0, ipcPort=35085, storageInfo=lv=-57;cid=testClusterID;nsid=1848861161;c=1731357913200), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:13,746 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x779a824e29903b6 with lease ID 0x1dd92465b45b3da5: Processing first storage report for DS-67b89cbf-a446-4c3e-878b-afb8343fdb39 from datanode DatanodeRegistration(127.0.0.1:41057, datanodeUuid=fef661c1-7bfe-4486-bd37-b4e67335c2b1, infoPort=34335, infoSecurePort=0, ipcPort=35085, storageInfo=lv=-57;cid=testClusterID;nsid=1848861161;c=1731357913200) 2024-11-11T20:45:13,746 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x779a824e29903b6 with lease ID 0x1dd92465b45b3da5: from storage DS-67b89cbf-a446-4c3e-878b-afb8343fdb39 node DatanodeRegistration(127.0.0.1:41057, datanodeUuid=fef661c1-7bfe-4486-bd37-b4e67335c2b1, infoPort=34335, infoSecurePort=0, ipcPort=35085, storageInfo=lv=-57;cid=testClusterID;nsid=1848861161;c=1731357913200), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T20:45:13,790 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6 2024-11-11T20:45:13,796 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/zookeeper_0, clientPort=59235, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T20:45:13,797 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59235 2024-11-11T20:45:13,797 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:13,799 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:13,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:45:13,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:45:13,811 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795 with version=8 2024-11-11T20:45:13,811 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/hbase-staging 2024-11-11T20:45:13,814 INFO [Time-limited test {}] client.ConnectionUtils(128): master/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:45:13,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:13,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:13,814 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:45:13,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:13,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:45:13,814 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T20:45:13,814 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:45:13,815 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43673 2024-11-11T20:45:13,816 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43673 connecting to ZooKeeper ensemble=127.0.0.1:59235 2024-11-11T20:45:13,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:436730x0, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:45:13,821 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43673-0x10030888f300000 connected 2024-11-11T20:45:13,831 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:13,833 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:13,836 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:13,836 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795, hbase.cluster.distributed=false 2024-11-11T20:45:13,838 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:45:13,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43673 2024-11-11T20:45:13,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43673 2024-11-11T20:45:13,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43673 2024-11-11T20:45:13,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43673 2024-11-11T20:45:13,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43673 2024-11-11T20:45:13,861 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:45:13,861 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:13,862 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:13,862 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:45:13,862 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:13,862 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:45:13,862 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T20:45:13,862 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:45:13,862 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39513 2024-11-11T20:45:13,865 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39513 connecting to ZooKeeper ensemble=127.0.0.1:59235 2024-11-11T20:45:13,866 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:13,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:13,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:395130x0, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:45:13,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39513-0x10030888f300001 connected 2024-11-11T20:45:13,873 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:13,873 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T20:45:13,877 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T20:45:13,878 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T20:45:13,880 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:45:13,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39513 2024-11-11T20:45:13,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39513 2024-11-11T20:45:13,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39513 2024-11-11T20:45:13,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39513 2024-11-11T20:45:13,893 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39513 2024-11-11T20:45:13,912 DEBUG [M:0;51ca66f7ee3c:43673 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;51ca66f7ee3c:43673 2024-11-11T20:45:13,913 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/51ca66f7ee3c,43673,1731357913813 2024-11-11T20:45:13,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:45:13,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:45:13,917 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/51ca66f7ee3c,43673,1731357913813 2024-11-11T20:45:13,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T20:45:13,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:13,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:13,922 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T20:45:13,922 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/51ca66f7ee3c,43673,1731357913813 from backup master directory 2024-11-11T20:45:13,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/51ca66f7ee3c,43673,1731357913813 2024-11-11T20:45:13,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:45:13,923 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:45:13,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:45:13,924 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=51ca66f7ee3c,43673,1731357913813 2024-11-11T20:45:13,929 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/hbase.id] with ID: 16c801ff-103e-44b2-949a-6d56d3c6509a 2024-11-11T20:45:13,929 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/.tmp/hbase.id 2024-11-11T20:45:13,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:45:13,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:45:13,945 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/.tmp/hbase.id]:[hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/hbase.id] 2024-11-11T20:45:13,965 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:13,965 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T20:45:13,968 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-11T20:45:13,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:13,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:13,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:45:13,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:45:13,990 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:45:13,991 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T20:45:13,992 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:45:14,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:45:14,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:45:14,007 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store 2024-11-11T20:45:14,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:45:14,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:45:14,020 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:45:14,020 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:45:14,020 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:14,021 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:14,021 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:45:14,021 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:14,021 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:14,021 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731357914020Disabling compacts and flushes for region at 1731357914020Disabling writes for close at 1731357914021 (+1 ms)Writing region close event to WAL at 1731357914021Closed at 1731357914021 2024-11-11T20:45:14,023 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/.initializing 2024-11-11T20:45:14,023 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/WALs/51ca66f7ee3c,43673,1731357913813 2024-11-11T20:45:14,027 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C43673%2C1731357913813, suffix=, logDir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/WALs/51ca66f7ee3c,43673,1731357913813, archiveDir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/oldWALs, maxLogs=10 2024-11-11T20:45:14,028 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C43673%2C1731357913813.1731357914028 2024-11-11T20:45:14,037 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/WALs/51ca66f7ee3c,43673,1731357913813/51ca66f7ee3c%2C43673%2C1731357913813.1731357914028 2024-11-11T20:45:14,053 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42837:42837),(127.0.0.1/127.0.0.1:34335:34335)] 2024-11-11T20:45:14,057 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:45:14,057 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:45:14,057 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,058 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T20:45:14,064 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:14,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T20:45:14,067 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:45:14,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T20:45:14,071 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:45:14,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T20:45:14,074 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,075 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:45:14,075 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,076 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,076 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,078 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,078 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,078 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T20:45:14,080 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:14,082 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:45:14,082 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750349, jitterRate=-0.04588225483894348}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T20:45:14,083 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731357914058Initializing all the Stores at 1731357914059 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357914059Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357914061 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357914061Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357914061Cleaning up temporary data from old regions at 1731357914078 (+17 ms)Region opened successfully at 1731357914083 (+5 ms) 2024-11-11T20:45:14,084 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T20:45:14,088 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78c94276, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:45:14,089 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T20:45:14,089 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T20:45:14,089 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T20:45:14,089 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T20:45:14,090 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T20:45:14,091 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T20:45:14,091 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T20:45:14,093 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T20:45:14,094 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T20:45:14,095 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T20:45:14,095 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T20:45:14,096 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T20:45:14,097 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T20:45:14,097 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T20:45:14,098 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T20:45:14,099 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T20:45:14,100 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T20:45:14,101 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T20:45:14,103 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T20:45:14,103 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T20:45:14,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:14,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:14,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:14,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:14,106 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=51ca66f7ee3c,43673,1731357913813, sessionid=0x10030888f300000, setting cluster-up flag (Was=false) 2024-11-11T20:45:14,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:14,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:14,111 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T20:45:14,112 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,43673,1731357913813 2024-11-11T20:45:14,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:14,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:14,117 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T20:45:14,118 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,43673,1731357913813 2024-11-11T20:45:14,120 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T20:45:14,121 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T20:45:14,122 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T20:45:14,122 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T20:45:14,122 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 51ca66f7ee3c,43673,1731357913813 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T20:45:14,124 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:45:14,124 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:45:14,124 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:45:14,124 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:45:14,124 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/51ca66f7ee3c:0, corePoolSize=10, maxPoolSize=10 2024-11-11T20:45:14,124 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,124 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:45:14,124 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,126 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731357944126 2024-11-11T20:45:14,126 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T20:45:14,126 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T20:45:14,126 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T20:45:14,126 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T20:45:14,126 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T20:45:14,126 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:45:14,126 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T20:45:14,126 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T20:45:14,126 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,127 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T20:45:14,127 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T20:45:14,127 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T20:45:14,127 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T20:45:14,127 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T20:45:14,128 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357914127,5,FailOnTimeoutGroup] 2024-11-11T20:45:14,128 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,128 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357914128,5,FailOnTimeoutGroup] 2024-11-11T20:45:14,128 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,128 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T20:45:14,128 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,128 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T20:45:14,128 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:45:14,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:45:14,139 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T20:45:14,139 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795 2024-11-11T20:45:14,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:45:14,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:45:14,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:45:14,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:45:14,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:45:14,151 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:14,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:45:14,153 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:45:14,153 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:14,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:45:14,156 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:45:14,156 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,157 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:14,157 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:45:14,159 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:45:14,159 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:14,160 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:45:14,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740 2024-11-11T20:45:14,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740 2024-11-11T20:45:14,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:45:14,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:45:14,163 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:45:14,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:45:14,167 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:45:14,167 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751981, jitterRate=-0.04380704462528229}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:45:14,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731357914147Initializing all the Stores at 1731357914148 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357914148Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357914148Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357914148Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357914148Cleaning up temporary data from old regions at 1731357914163 (+15 ms)Region opened successfully at 1731357914169 (+6 ms) 2024-11-11T20:45:14,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:45:14,169 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:45:14,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:45:14,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:45:14,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:45:14,170 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:45:14,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731357914169Disabling compacts and flushes for region at 1731357914169Disabling writes for close at 1731357914169Writing region close event to WAL at 1731357914170 (+1 ms)Closed at 1731357914170 2024-11-11T20:45:14,172 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:45:14,172 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T20:45:14,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T20:45:14,174 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:45:14,176 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T20:45:14,199 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(746): ClusterId : 16c801ff-103e-44b2-949a-6d56d3c6509a 2024-11-11T20:45:14,199 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T20:45:14,201 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T20:45:14,201 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T20:45:14,203 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T20:45:14,203 DEBUG [RS:0;51ca66f7ee3c:39513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@342d5fc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:45:14,216 DEBUG [RS:0;51ca66f7ee3c:39513 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;51ca66f7ee3c:39513 2024-11-11T20:45:14,216 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T20:45:14,216 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T20:45:14,216 DEBUG [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T20:45:14,217 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(2659): reportForDuty to master=51ca66f7ee3c,43673,1731357913813 with port=39513, startcode=1731357913861 2024-11-11T20:45:14,218 DEBUG [RS:0;51ca66f7ee3c:39513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T20:45:14,220 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55205, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T20:45:14,221 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43673 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:14,221 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43673 {}] master.ServerManager(517): Registering regionserver=51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:14,224 DEBUG [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795 2024-11-11T20:45:14,224 DEBUG [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39927 2024-11-11T20:45:14,224 DEBUG [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T20:45:14,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:45:14,226 DEBUG [RS:0;51ca66f7ee3c:39513 {}] zookeeper.ZKUtil(111): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:14,226 WARN [RS:0;51ca66f7ee3c:39513 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:45:14,226 INFO [RS:0;51ca66f7ee3c:39513 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:45:14,226 DEBUG [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/WALs/51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:14,226 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [51ca66f7ee3c,39513,1731357913861] 2024-11-11T20:45:14,231 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T20:45:14,234 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T20:45:14,235 INFO [RS:0;51ca66f7ee3c:39513 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T20:45:14,235 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,237 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T20:45:14,238 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T20:45:14,239 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,239 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,239 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,239 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,239 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,240 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,240 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:45:14,240 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,240 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,240 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,240 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,240 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,240 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:14,240 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:45:14,241 DEBUG [RS:0;51ca66f7ee3c:39513 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:45:14,241 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,242 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,242 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,242 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,242 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,242 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39513,1731357913861-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:45:14,264 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T20:45:14,265 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39513,1731357913861-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,265 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,265 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.Replication(171): 51ca66f7ee3c,39513,1731357913861 started 2024-11-11T20:45:14,287 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,287 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(1482): Serving as 51ca66f7ee3c,39513,1731357913861, RpcServer on 51ca66f7ee3c/172.17.0.2:39513, sessionid=0x10030888f300001 2024-11-11T20:45:14,287 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T20:45:14,287 DEBUG [RS:0;51ca66f7ee3c:39513 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:14,287 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,39513,1731357913861' 2024-11-11T20:45:14,287 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T20:45:14,288 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T20:45:14,289 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T20:45:14,289 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T20:45:14,289 DEBUG [RS:0;51ca66f7ee3c:39513 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:14,289 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,39513,1731357913861' 2024-11-11T20:45:14,289 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T20:45:14,289 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T20:45:14,290 DEBUG [RS:0;51ca66f7ee3c:39513 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T20:45:14,290 INFO [RS:0;51ca66f7ee3c:39513 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T20:45:14,290 INFO [RS:0;51ca66f7ee3c:39513 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T20:45:14,326 WARN [51ca66f7ee3c:43673 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T20:45:14,393 INFO [RS:0;51ca66f7ee3c:39513 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C39513%2C1731357913861, suffix=, logDir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/WALs/51ca66f7ee3c,39513,1731357913861, archiveDir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/oldWALs, maxLogs=32 2024-11-11T20:45:14,395 INFO [RS:0;51ca66f7ee3c:39513 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C39513%2C1731357913861.1731357914395 2024-11-11T20:45:14,401 INFO [RS:0;51ca66f7ee3c:39513 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/WALs/51ca66f7ee3c,39513,1731357913861/51ca66f7ee3c%2C39513%2C1731357913861.1731357914395 2024-11-11T20:45:14,403 DEBUG [RS:0;51ca66f7ee3c:39513 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34335:34335),(127.0.0.1/127.0.0.1:42837:42837)] 2024-11-11T20:45:14,576 DEBUG [51ca66f7ee3c:43673 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T20:45:14,577 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:14,579 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,39513,1731357913861, state=OPENING 2024-11-11T20:45:14,580 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T20:45:14,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:14,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:14,582 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:45:14,582 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:45:14,582 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:45:14,582 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,39513,1731357913861}] 2024-11-11T20:45:14,736 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T20:45:14,738 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54921, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T20:45:14,742 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T20:45:14,742 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:45:14,745 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C39513%2C1731357913861.meta, suffix=.meta, logDir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/WALs/51ca66f7ee3c,39513,1731357913861, archiveDir=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/oldWALs, maxLogs=32 2024-11-11T20:45:14,747 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C39513%2C1731357913861.meta.1731357914747.meta 2024-11-11T20:45:14,754 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/WALs/51ca66f7ee3c,39513,1731357913861/51ca66f7ee3c%2C39513%2C1731357913861.meta.1731357914747.meta 2024-11-11T20:45:14,757 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42837:42837),(127.0.0.1/127.0.0.1:34335:34335)] 2024-11-11T20:45:14,759 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:45:14,759 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T20:45:14,759 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T20:45:14,759 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T20:45:14,759 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T20:45:14,759 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:45:14,759 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T20:45:14,759 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T20:45:14,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:45:14,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:45:14,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:14,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:45:14,767 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:45:14,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:14,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:45:14,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:45:14,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,770 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:14,770 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:45:14,771 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:45:14,771 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:14,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:14,772 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:45:14,773 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740 2024-11-11T20:45:14,774 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740 2024-11-11T20:45:14,776 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:45:14,776 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:45:14,776 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:45:14,778 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:45:14,779 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841974, jitterRate=0.07062600553035736}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:45:14,779 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T20:45:14,780 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731357914760Writing region info on filesystem at 1731357914760Initializing all the Stores at 1731357914761 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357914761Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357914764 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357914764Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357914764Cleaning up temporary data from old regions at 1731357914776 (+12 ms)Running coprocessor post-open hooks at 1731357914779 (+3 ms)Region opened successfully at 1731357914780 (+1 ms) 2024-11-11T20:45:14,781 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731357914736 2024-11-11T20:45:14,785 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T20:45:14,785 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T20:45:14,786 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:14,787 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,39513,1731357913861, state=OPEN 2024-11-11T20:45:14,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:45:14,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:45:14,789 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:14,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:45:14,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:45:14,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T20:45:14,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,39513,1731357913861 in 207 msec 2024-11-11T20:45:14,795 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T20:45:14,795 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 620 msec 2024-11-11T20:45:14,796 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:45:14,796 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T20:45:14,798 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:45:14,798 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,39513,1731357913861, seqNum=-1] 2024-11-11T20:45:14,798 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:45:14,799 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48581, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:45:14,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 684 msec 2024-11-11T20:45:14,806 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731357914806, completionTime=-1 2024-11-11T20:45:14,806 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T20:45:14,806 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T20:45:14,808 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T20:45:14,808 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731357974808 2024-11-11T20:45:14,808 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731358034808 2024-11-11T20:45:14,809 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-11T20:45:14,809 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,43673,1731357913813-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,809 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,43673,1731357913813-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,809 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,43673,1731357913813-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,809 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-51ca66f7ee3c:43673, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,809 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,809 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,811 DEBUG [master/51ca66f7ee3c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T20:45:14,816 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.892sec 2024-11-11T20:45:14,816 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T20:45:14,816 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T20:45:14,816 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T20:45:14,816 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T20:45:14,816 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T20:45:14,816 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,43673,1731357913813-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:45:14,816 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,43673,1731357913813-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T20:45:14,819 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T20:45:14,819 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T20:45:14,819 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,43673,1731357913813-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:14,899 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@781d38e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:45:14,899 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 51ca66f7ee3c,43673,-1 for getting cluster id 2024-11-11T20:45:14,899 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T20:45:14,902 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '16c801ff-103e-44b2-949a-6d56d3c6509a' 2024-11-11T20:45:14,902 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T20:45:14,903 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "16c801ff-103e-44b2-949a-6d56d3c6509a" 2024-11-11T20:45:14,903 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0cd008, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:45:14,903 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [51ca66f7ee3c,43673,-1] 2024-11-11T20:45:14,903 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T20:45:14,905 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:14,906 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50984, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T20:45:14,908 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab35378, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:45:14,908 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:45:14,910 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,39513,1731357913861, seqNum=-1] 2024-11-11T20:45:14,910 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:45:14,913 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54410, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:45:14,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=51ca66f7ee3c,43673,1731357913813 2024-11-11T20:45:14,918 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:14,921 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T20:45:14,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T20:45:14,921 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:45:14,921 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:45:14,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:14,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:14,921 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T20:45:14,922 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T20:45:14,922 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=274994465, stopped=false 2024-11-11T20:45:14,922 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=51ca66f7ee3c,43673,1731357913813 2024-11-11T20:45:14,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:14,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:14,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:14,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:14,923 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:45:14,923 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:45:14,924 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:45:14,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:14,924 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:14,924 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:14,924 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '51ca66f7ee3c,39513,1731357913861' ***** 2024-11-11T20:45:14,925 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T20:45:14,925 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T20:45:14,925 INFO [RS:0;51ca66f7ee3c:39513 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T20:45:14,925 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T20:45:14,925 INFO [RS:0;51ca66f7ee3c:39513 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T20:45:14,925 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(959): stopping server 51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:14,925 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:45:14,925 INFO [RS:0;51ca66f7ee3c:39513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;51ca66f7ee3c:39513. 2024-11-11T20:45:14,925 DEBUG [RS:0;51ca66f7ee3c:39513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:45:14,925 DEBUG [RS:0;51ca66f7ee3c:39513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:14,925 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T20:45:14,925 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T20:45:14,925 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T20:45:14,926 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T20:45:14,926 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T20:45:14,926 DEBUG [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T20:45:14,926 DEBUG [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T20:45:14,926 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:45:14,926 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:45:14,926 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:45:14,926 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:45:14,926 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:45:14,926 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-11T20:45:14,948 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740/.tmp/ns/662a51f856a24e06979e9f2dc9642958 is 43, key is default/ns:d/1731357914800/Put/seqid=0 2024-11-11T20:45:14,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741835_1011 (size=5153) 2024-11-11T20:45:14,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741835_1011 (size=5153) 2024-11-11T20:45:14,955 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740/.tmp/ns/662a51f856a24e06979e9f2dc9642958 2024-11-11T20:45:14,964 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740/.tmp/ns/662a51f856a24e06979e9f2dc9642958 as hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740/ns/662a51f856a24e06979e9f2dc9642958 2024-11-11T20:45:14,972 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740/ns/662a51f856a24e06979e9f2dc9642958, entries=2, sequenceid=6, filesize=5.0 K 2024-11-11T20:45:14,974 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 47ms, sequenceid=6, compaction requested=false 2024-11-11T20:45:14,974 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T20:45:14,980 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T20:45:14,981 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:45:14,981 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:45:14,981 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731357914926Running coprocessor pre-close hooks at 1731357914926Disabling compacts and flushes for region at 1731357914926Disabling writes for close at 1731357914926Obtaining lock to block concurrent updates at 1731357914926Preparing flush snapshotting stores in 1588230740 at 1731357914926Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731357914927 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731357914927Flushing 1588230740/ns: creating writer at 1731357914928 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731357914948 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731357914948Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66eb557a: reopening flushed file at 1731357914962 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 47ms, sequenceid=6, compaction requested=false at 1731357914974 (+12 ms)Writing region close event to WAL at 1731357914975 (+1 ms)Running coprocessor post-close hooks at 1731357914981 (+6 ms)Closed at 1731357914981 2024-11-11T20:45:14,981 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T20:45:15,126 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(976): stopping server 51ca66f7ee3c,39513,1731357913861; all regions closed. 2024-11-11T20:45:15,127 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,127 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,127 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,127 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,127 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741834_1010 (size=1152) 2024-11-11T20:45:15,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741834_1010 (size=1152) 2024-11-11T20:45:15,132 DEBUG [RS:0;51ca66f7ee3c:39513 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/oldWALs 2024-11-11T20:45:15,133 INFO [RS:0;51ca66f7ee3c:39513 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C39513%2C1731357913861.meta:.meta(num 1731357914747) 2024-11-11T20:45:15,133 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,133 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,133 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,134 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741833_1009 (size=93) 2024-11-11T20:45:15,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741833_1009 (size=93) 2024-11-11T20:45:15,242 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T20:45:15,242 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T20:45:15,539 DEBUG [RS:0;51ca66f7ee3c:39513 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/oldWALs 2024-11-11T20:45:15,539 INFO [RS:0;51ca66f7ee3c:39513 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C39513%2C1731357913861:(num 1731357914395) 2024-11-11T20:45:15,539 DEBUG [RS:0;51ca66f7ee3c:39513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:15,539 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:45:15,539 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:45:15,539 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.ChoreService(370): Chore service for: regionserver/51ca66f7ee3c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T20:45:15,540 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:45:15,540 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:45:15,540 INFO [RS:0;51ca66f7ee3c:39513 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39513 2024-11-11T20:45:15,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/51ca66f7ee3c,39513,1731357913861 2024-11-11T20:45:15,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:45:15,541 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:45:15,542 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [51ca66f7ee3c,39513,1731357913861] 2024-11-11T20:45:15,543 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/51ca66f7ee3c,39513,1731357913861 already deleted, retry=false 2024-11-11T20:45:15,543 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 51ca66f7ee3c,39513,1731357913861 expired; onlineServers=0 2024-11-11T20:45:15,543 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '51ca66f7ee3c,43673,1731357913813' ***** 2024-11-11T20:45:15,543 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T20:45:15,543 INFO [M:0;51ca66f7ee3c:43673 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:45:15,543 INFO [M:0;51ca66f7ee3c:43673 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:45:15,543 DEBUG [M:0;51ca66f7ee3c:43673 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T20:45:15,544 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T20:45:15,544 DEBUG [M:0;51ca66f7ee3c:43673 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T20:45:15,544 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357914127 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357914127,5,FailOnTimeoutGroup] 2024-11-11T20:45:15,544 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357914128 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357914128,5,FailOnTimeoutGroup] 2024-11-11T20:45:15,544 INFO [M:0;51ca66f7ee3c:43673 {}] hbase.ChoreService(370): Chore service for: master/51ca66f7ee3c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T20:45:15,544 INFO [M:0;51ca66f7ee3c:43673 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:45:15,544 DEBUG [M:0;51ca66f7ee3c:43673 {}] master.HMaster(1795): Stopping service threads 2024-11-11T20:45:15,544 INFO [M:0;51ca66f7ee3c:43673 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T20:45:15,544 INFO [M:0;51ca66f7ee3c:43673 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:45:15,544 INFO [M:0;51ca66f7ee3c:43673 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T20:45:15,544 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T20:45:15,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T20:45:15,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:15,545 DEBUG [M:0;51ca66f7ee3c:43673 {}] zookeeper.ZKUtil(347): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T20:45:15,545 WARN [M:0;51ca66f7ee3c:43673 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T20:45:15,545 INFO [M:0;51ca66f7ee3c:43673 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/.lastflushedseqids 2024-11-11T20:45:15,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741836_1012 (size=99) 2024-11-11T20:45:15,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741836_1012 (size=99) 2024-11-11T20:45:15,555 INFO [M:0;51ca66f7ee3c:43673 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T20:45:15,556 INFO [M:0;51ca66f7ee3c:43673 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T20:45:15,556 DEBUG [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:45:15,556 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:15,556 DEBUG [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:15,556 DEBUG [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:45:15,556 DEBUG [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:15,556 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-11T20:45:15,573 DEBUG [M:0;51ca66f7ee3c:43673 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cbc1d92f767e4925b0d23c8d8bb180e7 is 82, key is hbase:meta,,1/info:regioninfo/1731357914786/Put/seqid=0 2024-11-11T20:45:15,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741837_1013 (size=5672) 2024-11-11T20:45:15,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741837_1013 (size=5672) 2024-11-11T20:45:15,579 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cbc1d92f767e4925b0d23c8d8bb180e7 2024-11-11T20:45:15,600 DEBUG [M:0;51ca66f7ee3c:43673 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ccc2717dc3d243cb80599ce63cfb0a6c is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731357914805/Put/seqid=0 2024-11-11T20:45:15,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741838_1014 (size=5275) 2024-11-11T20:45:15,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741838_1014 (size=5275) 2024-11-11T20:45:15,606 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ccc2717dc3d243cb80599ce63cfb0a6c 2024-11-11T20:45:15,626 DEBUG [M:0;51ca66f7ee3c:43673 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dbe7a631fde94be4b1e644da7d0d4944 is 69, key is 51ca66f7ee3c,39513,1731357913861/rs:state/1731357914222/Put/seqid=0 2024-11-11T20:45:15,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741839_1015 (size=5156) 2024-11-11T20:45:15,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741839_1015 (size=5156) 2024-11-11T20:45:15,631 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dbe7a631fde94be4b1e644da7d0d4944 2024-11-11T20:45:15,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:45:15,642 INFO [RS:0;51ca66f7ee3c:39513 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:45:15,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39513-0x10030888f300001, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:45:15,642 INFO [RS:0;51ca66f7ee3c:39513 {}] regionserver.HRegionServer(1031): Exiting; stopping=51ca66f7ee3c,39513,1731357913861; zookeeper connection closed. 2024-11-11T20:45:15,643 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1a81415b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1a81415b 2024-11-11T20:45:15,643 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T20:45:15,654 DEBUG [M:0;51ca66f7ee3c:43673 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90b37d147a6b4d17a215aff96ce5472b is 52, key is load_balancer_on/state:d/1731357914919/Put/seqid=0 2024-11-11T20:45:15,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741840_1016 (size=5056) 2024-11-11T20:45:15,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741840_1016 (size=5056) 2024-11-11T20:45:15,661 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90b37d147a6b4d17a215aff96ce5472b 2024-11-11T20:45:15,668 DEBUG [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cbc1d92f767e4925b0d23c8d8bb180e7 as hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cbc1d92f767e4925b0d23c8d8bb180e7 2024-11-11T20:45:15,674 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cbc1d92f767e4925b0d23c8d8bb180e7, entries=8, sequenceid=29, filesize=5.5 K 2024-11-11T20:45:15,675 DEBUG [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ccc2717dc3d243cb80599ce63cfb0a6c as hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ccc2717dc3d243cb80599ce63cfb0a6c 2024-11-11T20:45:15,680 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ccc2717dc3d243cb80599ce63cfb0a6c, entries=3, sequenceid=29, filesize=5.2 K 2024-11-11T20:45:15,682 DEBUG [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dbe7a631fde94be4b1e644da7d0d4944 as hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dbe7a631fde94be4b1e644da7d0d4944 2024-11-11T20:45:15,688 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dbe7a631fde94be4b1e644da7d0d4944, entries=1, sequenceid=29, filesize=5.0 K 2024-11-11T20:45:15,689 DEBUG [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90b37d147a6b4d17a215aff96ce5472b as hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/90b37d147a6b4d17a215aff96ce5472b 2024-11-11T20:45:15,696 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39927/user/jenkins/test-data/7bbbd67e-325d-df80-ef14-f2ab76902795/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/90b37d147a6b4d17a215aff96ce5472b, entries=1, sequenceid=29, filesize=4.9 K 2024-11-11T20:45:15,697 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false 2024-11-11T20:45:15,699 INFO [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:15,699 DEBUG [M:0;51ca66f7ee3c:43673 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731357915556Disabling compacts and flushes for region at 1731357915556Disabling writes for close at 1731357915556Obtaining lock to block concurrent updates at 1731357915556Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731357915556Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731357915557 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731357915557Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731357915557Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731357915573 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731357915573Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731357915585 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731357915600 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731357915600Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731357915611 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731357915625 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731357915625Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731357915636 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731357915653 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731357915653Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c0ab1b2: reopening flushed file at 1731357915666 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@228d3d69: reopening flushed file at 1731357915674 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21c36273: reopening flushed file at 1731357915681 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63c0999f: reopening flushed file at 1731357915688 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false at 1731357915697 (+9 ms)Writing region close event to WAL at 1731357915699 (+2 ms)Closed at 1731357915699 2024-11-11T20:45:15,699 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,699 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,699 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,700 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,700 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:15,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41057 is added to blk_1073741830_1006 (size=10311) 2024-11-11T20:45:15,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35321 is added to blk_1073741830_1006 (size=10311) 2024-11-11T20:45:15,702 INFO [M:0;51ca66f7ee3c:43673 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T20:45:15,703 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:45:15,703 INFO [M:0;51ca66f7ee3c:43673 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43673 2024-11-11T20:45:15,703 INFO [M:0;51ca66f7ee3c:43673 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:45:15,805 INFO [M:0;51ca66f7ee3c:43673 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:45:15,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:45:15,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43673-0x10030888f300000, quorum=127.0.0.1:59235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:45:15,812 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@788d8945{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:15,812 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c30f553{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:45:15,813 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:45:15,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@463a48f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:45:15,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4da0f787{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/hadoop.log.dir/,STOPPED} 2024-11-11T20:45:15,816 WARN [BP-403757501-172.17.0.2-1731357913200 heartbeating to localhost/127.0.0.1:39927 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:45:15,816 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:45:15,816 WARN [BP-403757501-172.17.0.2-1731357913200 heartbeating to localhost/127.0.0.1:39927 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-403757501-172.17.0.2-1731357913200 (Datanode Uuid fef661c1-7bfe-4486-bd37-b4e67335c2b1) service to localhost/127.0.0.1:39927 2024-11-11T20:45:15,816 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:45:15,817 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/data/data3/current/BP-403757501-172.17.0.2-1731357913200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:15,817 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/data/data4/current/BP-403757501-172.17.0.2-1731357913200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:15,818 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:45:15,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f841e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:15,821 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3270c9ae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:45:15,821 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:45:15,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55f7876e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:45:15,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68b9cf2c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/hadoop.log.dir/,STOPPED} 2024-11-11T20:45:15,823 WARN [BP-403757501-172.17.0.2-1731357913200 heartbeating to localhost/127.0.0.1:39927 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:45:15,823 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:45:15,823 WARN [BP-403757501-172.17.0.2-1731357913200 heartbeating to localhost/127.0.0.1:39927 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-403757501-172.17.0.2-1731357913200 (Datanode Uuid c7c349b1-0868-4a48-9c39-432e1e18b88d) service to localhost/127.0.0.1:39927 2024-11-11T20:45:15,823 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:45:15,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/data/data1/current/BP-403757501-172.17.0.2-1731357913200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:15,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/cluster_b47fbee1-c06d-8a13-d3fd-697288e507a4/data/data2/current/BP-403757501-172.17.0.2-1731357913200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:15,824 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:45:15,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14a3b236{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:45:15,831 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@630e1a46{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:45:15,831 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:45:15,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bd9c5b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:45:15,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@534816f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/hadoop.log.dir/,STOPPED} 2024-11-11T20:45:15,836 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T20:45:15,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T20:45:15,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T20:45:15,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/hadoop.log.dir so I do NOT create it in target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75 2024-11-11T20:45:15,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cd42516-19a5-4c20-b0a9-2c6aa50c92a6/hadoop.tmp.dir so I do NOT create it in target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75 2024-11-11T20:45:15,856 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186, deleteOnExit=true 2024-11-11T20:45:15,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T20:45:15,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/test.cache.data in system properties and HBase conf 2024-11-11T20:45:15,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T20:45:15,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir in system properties and HBase conf 2024-11-11T20:45:15,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T20:45:15,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T20:45:15,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T20:45:15,857 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T20:45:15,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:45:15,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:45:15,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T20:45:15,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:45:15,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T20:45:15,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T20:45:15,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:45:15,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:45:15,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T20:45:15,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/nfs.dump.dir in system properties and HBase conf 2024-11-11T20:45:15,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/java.io.tmpdir in system properties and HBase conf 2024-11-11T20:45:15,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:45:15,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T20:45:15,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T20:45:15,870 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:45:15,927 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:15,937 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:45:15,940 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:45:15,940 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:45:15,940 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:45:15,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:15,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15d1211f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:45:15,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16369da1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:45:16,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4705e615{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/java.io.tmpdir/jetty-localhost-35867-hadoop-hdfs-3_4_1-tests_jar-_-any-7327696426734291102/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:45:16,052 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d8a9c69{HTTP/1.1, (http/1.1)}{localhost:35867} 2024-11-11T20:45:16,053 INFO [Time-limited test {}] server.Server(415): Started @105277ms 2024-11-11T20:45:16,069 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:45:16,132 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:16,136 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:45:16,137 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:45:16,137 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:45:16,137 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T20:45:16,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49b91397{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:45:16,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35a03ba9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:45:16,242 INFO [regionserver/51ca66f7ee3c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:45:16,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24befc55{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/java.io.tmpdir/jetty-localhost-36415-hadoop-hdfs-3_4_1-tests_jar-_-any-16693645258274179304/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:16,245 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@259c861e{HTTP/1.1, (http/1.1)}{localhost:36415} 2024-11-11T20:45:16,245 INFO [Time-limited test {}] server.Server(415): Started @105469ms 2024-11-11T20:45:16,246 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:45:16,275 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:16,280 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:45:16,284 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:45:16,284 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:45:16,284 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:45:16,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75096fee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:45:16,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d3d4ef0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:45:16,319 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data1/current/BP-797088966-172.17.0.2-1731357915882/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:16,319 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data2/current/BP-797088966-172.17.0.2-1731357915882/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:16,347 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:45:16,350 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa8d56be42e2df46f with lease ID 0xa8e7f64ec2ddd904: Processing first storage report for DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec from datanode DatanodeRegistration(127.0.0.1:33779, datanodeUuid=839acd95-50b4-4887-ada9-927bf6b5fa3c, infoPort=34011, infoSecurePort=0, ipcPort=37701, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882) 2024-11-11T20:45:16,350 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8d56be42e2df46f with lease ID 0xa8e7f64ec2ddd904: from storage DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec node DatanodeRegistration(127.0.0.1:33779, datanodeUuid=839acd95-50b4-4887-ada9-927bf6b5fa3c, infoPort=34011, infoSecurePort=0, ipcPort=37701, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:16,350 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa8d56be42e2df46f with lease ID 0xa8e7f64ec2ddd904: Processing first storage report for DS-569f2eb9-4fd4-4890-b8ce-2027d93c228e from datanode DatanodeRegistration(127.0.0.1:33779, datanodeUuid=839acd95-50b4-4887-ada9-927bf6b5fa3c, infoPort=34011, infoSecurePort=0, ipcPort=37701, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882) 2024-11-11T20:45:16,350 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8d56be42e2df46f with lease ID 0xa8e7f64ec2ddd904: from storage DS-569f2eb9-4fd4-4890-b8ce-2027d93c228e node DatanodeRegistration(127.0.0.1:33779, datanodeUuid=839acd95-50b4-4887-ada9-927bf6b5fa3c, infoPort=34011, infoSecurePort=0, ipcPort=37701, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:16,384 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75de815b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/java.io.tmpdir/jetty-localhost-46261-hadoop-hdfs-3_4_1-tests_jar-_-any-2363116291151811425/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:16,385 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fe8f7b0{HTTP/1.1, (http/1.1)}{localhost:46261} 2024-11-11T20:45:16,385 INFO [Time-limited test {}] server.Server(415): Started @105609ms 2024-11-11T20:45:16,386 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:45:16,450 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data3/current/BP-797088966-172.17.0.2-1731357915882/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:16,450 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data4/current/BP-797088966-172.17.0.2-1731357915882/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:16,468 WARN [Thread-670 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:45:16,471 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd39f9a1fbaf5f6aa with lease ID 0xa8e7f64ec2ddd905: Processing first storage report for DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7 from datanode DatanodeRegistration(127.0.0.1:39897, datanodeUuid=7e594afa-bcac-4748-944c-21b12f1087c4, infoPort=34609, infoSecurePort=0, ipcPort=36389, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882) 2024-11-11T20:45:16,472 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd39f9a1fbaf5f6aa with lease ID 0xa8e7f64ec2ddd905: from storage DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7 node DatanodeRegistration(127.0.0.1:39897, datanodeUuid=7e594afa-bcac-4748-944c-21b12f1087c4, infoPort=34609, infoSecurePort=0, ipcPort=36389, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:16,472 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd39f9a1fbaf5f6aa with lease ID 0xa8e7f64ec2ddd905: Processing first storage report for DS-62aafb6d-a6de-47ab-852b-4940234b595e from datanode DatanodeRegistration(127.0.0.1:39897, datanodeUuid=7e594afa-bcac-4748-944c-21b12f1087c4, infoPort=34609, infoSecurePort=0, ipcPort=36389, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882) 2024-11-11T20:45:16,472 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd39f9a1fbaf5f6aa with lease ID 0xa8e7f64ec2ddd905: from storage DS-62aafb6d-a6de-47ab-852b-4940234b595e node DatanodeRegistration(127.0.0.1:39897, datanodeUuid=7e594afa-bcac-4748-944c-21b12f1087c4, infoPort=34609, infoSecurePort=0, ipcPort=36389, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:16,512 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75 2024-11-11T20:45:16,515 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/zookeeper_0, clientPort=57310, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T20:45:16,516 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57310 2024-11-11T20:45:16,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:16,518 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:16,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:45:16,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33779 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:45:16,530 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411 with version=8 2024-11-11T20:45:16,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/hbase-staging 2024-11-11T20:45:16,532 INFO [Time-limited test {}] client.ConnectionUtils(128): master/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:45:16,532 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:16,532 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:16,532 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:45:16,532 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:16,532 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:45:16,532 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T20:45:16,532 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:45:16,533 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39527 2024-11-11T20:45:16,534 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39527 connecting to ZooKeeper ensemble=127.0.0.1:57310 2024-11-11T20:45:16,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395270x0, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:45:16,538 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39527-0x100308899d10000 connected 2024-11-11T20:45:16,551 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:16,553 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:16,556 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:16,556 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411, hbase.cluster.distributed=false 2024-11-11T20:45:16,558 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:45:16,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39527 2024-11-11T20:45:16,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39527 2024-11-11T20:45:16,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39527 2024-11-11T20:45:16,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39527 2024-11-11T20:45:16,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39527 2024-11-11T20:45:16,578 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:45:16,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:16,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:16,579 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:45:16,579 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:16,579 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:45:16,579 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T20:45:16,579 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:45:16,580 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38547 2024-11-11T20:45:16,581 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38547 connecting to ZooKeeper ensemble=127.0.0.1:57310 2024-11-11T20:45:16,582 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:16,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:16,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385470x0, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:45:16,588 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38547-0x100308899d10001 connected 2024-11-11T20:45:16,589 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:16,589 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T20:45:16,590 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T20:45:16,591 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T20:45:16,592 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:45:16,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38547 2024-11-11T20:45:16,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38547 2024-11-11T20:45:16,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38547 2024-11-11T20:45:16,598 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38547 2024-11-11T20:45:16,599 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38547 2024-11-11T20:45:16,617 DEBUG [M:0;51ca66f7ee3c:39527 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;51ca66f7ee3c:39527 2024-11-11T20:45:16,617 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/51ca66f7ee3c,39527,1731357916531 2024-11-11T20:45:16,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:45:16,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:45:16,619 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/51ca66f7ee3c,39527,1731357916531 2024-11-11T20:45:16,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:16,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T20:45:16,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:16,621 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T20:45:16,621 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/51ca66f7ee3c,39527,1731357916531 from backup master directory 2024-11-11T20:45:16,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/51ca66f7ee3c,39527,1731357916531 2024-11-11T20:45:16,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:45:16,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:45:16,622 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:45:16,622 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=51ca66f7ee3c,39527,1731357916531 2024-11-11T20:45:16,629 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/hbase.id] with ID: bad0e12b-fe87-44f4-8c5e-8909d4f33354 2024-11-11T20:45:16,629 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/.tmp/hbase.id 2024-11-11T20:45:16,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:45:16,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33779 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:45:16,644 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/.tmp/hbase.id]:[hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/hbase.id] 2024-11-11T20:45:16,666 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:16,666 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T20:45:16,668 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-11T20:45:16,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:16,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:16,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33779 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:45:16,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:45:16,700 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:45:16,701 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T20:45:16,702 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:45:16,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33779 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:45:16,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:45:16,733 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store 2024-11-11T20:45:16,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33779 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:45:16,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:45:16,752 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:45:16,752 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:45:16,752 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:16,752 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:16,752 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:45:16,752 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:16,752 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:45:16,752 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731357916752Disabling compacts and flushes for region at 1731357916752Disabling writes for close at 1731357916752Writing region close event to WAL at 1731357916752Closed at 1731357916752 2024-11-11T20:45:16,753 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/.initializing 2024-11-11T20:45:16,753 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531 2024-11-11T20:45:16,757 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C39527%2C1731357916531, suffix=, logDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531, archiveDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/oldWALs, maxLogs=10 2024-11-11T20:45:16,757 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C39527%2C1731357916531.1731357916757 2024-11-11T20:45:16,764 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531/51ca66f7ee3c%2C39527%2C1731357916531.1731357916757 2024-11-11T20:45:16,769 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34011:34011),(127.0.0.1/127.0.0.1:34609:34609)] 2024-11-11T20:45:16,773 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:45:16,774 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:45:16,774 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,774 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,777 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T20:45:16,779 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:16,780 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:16,780 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T20:45:16,781 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:16,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:45:16,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T20:45:16,784 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:16,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:45:16,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,787 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T20:45:16,787 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:16,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:45:16,788 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,789 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,789 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,791 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,791 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,792 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T20:45:16,793 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:45:16,797 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:45:16,798 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=754381, jitterRate=-0.04075555503368378}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T20:45:16,799 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731357916774Initializing all the Stores at 1731357916775 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357916775Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357916777 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357916777Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357916777Cleaning up temporary data from old regions at 1731357916791 (+14 ms)Region opened successfully at 1731357916799 (+8 ms) 2024-11-11T20:45:16,801 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T20:45:16,806 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5187ed8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:45:16,807 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T20:45:16,807 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T20:45:16,807 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T20:45:16,807 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T20:45:16,808 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T20:45:16,809 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T20:45:16,809 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T20:45:16,824 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T20:45:16,825 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T20:45:16,826 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T20:45:16,826 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T20:45:16,827 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T20:45:16,828 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T20:45:16,828 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T20:45:16,829 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T20:45:16,830 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T20:45:16,830 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:45:16,831 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T20:45:16,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-11T20:45:16,833 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T20:45:16,834 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T20:45:16,837 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T20:45:16,838 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T20:45:16,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:16,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:16,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:16,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:16,842 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=51ca66f7ee3c,39527,1731357916531, sessionid=0x100308899d10000, setting cluster-up flag (Was=false) 2024-11-11T20:45:16,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:16,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:16,850 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T20:45:16,852 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,39527,1731357916531 2024-11-11T20:45:16,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:16,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:16,863 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T20:45:16,864 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,39527,1731357916531 2024-11-11T20:45:16,865 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T20:45:16,871 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T20:45:16,871 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T20:45:16,871 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T20:45:16,871 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 51ca66f7ee3c,39527,1731357916531 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T20:45:16,873 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:45:16,873 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:45:16,873 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:45:16,873 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:45:16,873 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/51ca66f7ee3c:0, corePoolSize=10, maxPoolSize=10 2024-11-11T20:45:16,873 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,873 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:45:16,873 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,879 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:45:16,879 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T20:45:16,880 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:16,880 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T20:45:16,888 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731357946888 2024-11-11T20:45:16,888 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T20:45:16,888 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T20:45:16,889 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T20:45:16,889 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T20:45:16,889 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T20:45:16,889 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T20:45:16,893 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,898 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T20:45:16,899 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T20:45:16,899 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T20:45:16,901 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T20:45:16,901 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T20:45:16,908 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(746): ClusterId : bad0e12b-fe87-44f4-8c5e-8909d4f33354 2024-11-11T20:45:16,909 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T20:45:16,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:45:16,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33779 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:45:16,910 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357916901,5,FailOnTimeoutGroup] 2024-11-11T20:45:16,910 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357916910,5,FailOnTimeoutGroup] 2024-11-11T20:45:16,910 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,910 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T20:45:16,910 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,910 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,912 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T20:45:16,912 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T20:45:16,914 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T20:45:16,916 DEBUG [RS:0;51ca66f7ee3c:38547 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f3cfde0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:45:16,936 DEBUG [RS:0;51ca66f7ee3c:38547 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;51ca66f7ee3c:38547 2024-11-11T20:45:16,937 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T20:45:16,937 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T20:45:16,937 DEBUG [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T20:45:16,938 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(2659): reportForDuty to master=51ca66f7ee3c,39527,1731357916531 with port=38547, startcode=1731357916578 2024-11-11T20:45:16,938 DEBUG [RS:0;51ca66f7ee3c:38547 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T20:45:16,940 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54923, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T20:45:16,941 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39527 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:16,941 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39527 {}] master.ServerManager(517): Registering regionserver=51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:16,943 DEBUG [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411 2024-11-11T20:45:16,943 DEBUG [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46503 2024-11-11T20:45:16,943 DEBUG [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T20:45:16,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:45:16,946 DEBUG [RS:0;51ca66f7ee3c:38547 {}] zookeeper.ZKUtil(111): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:16,946 WARN [RS:0;51ca66f7ee3c:38547 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:45:16,946 INFO [RS:0;51ca66f7ee3c:38547 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:45:16,946 DEBUG [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:16,950 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [51ca66f7ee3c,38547,1731357916578] 2024-11-11T20:45:16,960 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T20:45:16,964 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T20:45:16,964 INFO [RS:0;51ca66f7ee3c:38547 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T20:45:16,964 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,965 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T20:45:16,968 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T20:45:16,968 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,968 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,968 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,968 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,968 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,968 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,969 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:45:16,969 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,969 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,969 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,969 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,969 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,969 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:16,969 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:45:16,969 DEBUG [RS:0;51ca66f7ee3c:38547 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:45:16,971 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,971 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,971 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,971 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,971 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,971 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,38547,1731357916578-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:45:16,994 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T20:45:16,994 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,38547,1731357916578-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,994 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:16,995 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.Replication(171): 51ca66f7ee3c,38547,1731357916578 started 2024-11-11T20:45:17,017 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,017 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(1482): Serving as 51ca66f7ee3c,38547,1731357916578, RpcServer on 51ca66f7ee3c/172.17.0.2:38547, sessionid=0x100308899d10001 2024-11-11T20:45:17,017 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T20:45:17,017 DEBUG [RS:0;51ca66f7ee3c:38547 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:17,017 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,38547,1731357916578' 2024-11-11T20:45:17,017 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T20:45:17,018 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T20:45:17,019 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T20:45:17,019 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T20:45:17,019 DEBUG [RS:0;51ca66f7ee3c:38547 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:17,019 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,38547,1731357916578' 2024-11-11T20:45:17,019 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T20:45:17,019 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T20:45:17,020 DEBUG [RS:0;51ca66f7ee3c:38547 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T20:45:17,020 INFO [RS:0;51ca66f7ee3c:38547 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T20:45:17,020 INFO [RS:0;51ca66f7ee3c:38547 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T20:45:17,122 INFO [RS:0;51ca66f7ee3c:38547 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C38547%2C1731357916578, suffix=, logDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578, archiveDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs, maxLogs=32 2024-11-11T20:45:17,123 INFO [RS:0;51ca66f7ee3c:38547 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 2024-11-11T20:45:17,129 INFO [RS:0;51ca66f7ee3c:38547 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 2024-11-11T20:45:17,130 DEBUG [RS:0;51ca66f7ee3c:38547 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34609:34609),(127.0.0.1/127.0.0.1:34011:34011)] 2024-11-11T20:45:17,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:17,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:17,311 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T20:45:17,311 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411 2024-11-11T20:45:17,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741833_1009 (size=32) 2024-11-11T20:45:17,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33779 is added to blk_1073741833_1009 (size=32) 2024-11-11T20:45:17,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:45:17,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:45:17,322 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:45:17,322 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:17,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:17,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:45:17,324 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:45:17,324 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:17,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:17,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:45:17,327 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:45:17,328 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:17,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:17,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:45:17,331 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:45:17,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:17,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:17,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:45:17,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740 2024-11-11T20:45:17,334 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740 2024-11-11T20:45:17,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:45:17,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:45:17,336 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:45:17,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:45:17,340 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:45:17,341 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735366, jitterRate=-0.06493382155895233}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:45:17,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731357917319Initializing all the Stores at 1731357917320 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357917320Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357917320Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357917320Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357917320Cleaning up temporary data from old regions at 1731357917336 (+16 ms)Region opened successfully at 1731357917341 (+5 ms) 2024-11-11T20:45:17,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:45:17,341 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:45:17,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:45:17,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:45:17,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:45:17,342 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:45:17,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731357917341Disabling compacts and flushes for region at 1731357917341Disabling writes for close at 1731357917342 (+1 ms)Writing region close event to WAL at 1731357917342Closed at 1731357917342 2024-11-11T20:45:17,344 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:45:17,344 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T20:45:17,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T20:45:17,345 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:45:17,346 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T20:45:17,496 DEBUG [51ca66f7ee3c:39527 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T20:45:17,497 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:17,499 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,38547,1731357916578, state=OPENING 2024-11-11T20:45:17,500 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T20:45:17,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:17,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:17,502 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:45:17,502 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:45:17,502 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:45:17,502 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,38547,1731357916578}] 2024-11-11T20:45:17,657 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T20:45:17,662 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48999, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T20:45:17,672 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T20:45:17,673 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:45:17,675 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C38547%2C1731357916578.meta, suffix=.meta, logDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578, archiveDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs, maxLogs=32 2024-11-11T20:45:17,677 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta 2024-11-11T20:45:17,682 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta 2024-11-11T20:45:17,685 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34011:34011),(127.0.0.1/127.0.0.1:34609:34609)] 2024-11-11T20:45:17,693 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:45:17,693 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T20:45:17,693 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T20:45:17,694 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T20:45:17,694 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T20:45:17,694 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:45:17,694 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T20:45:17,694 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T20:45:17,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:45:17,697 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:45:17,697 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:17,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:17,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:45:17,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:45:17,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:17,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:17,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:45:17,700 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:45:17,700 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:17,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:17,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:45:17,702 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:45:17,702 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:17,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:45:17,703 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:45:17,704 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740 2024-11-11T20:45:17,705 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740 2024-11-11T20:45:17,707 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:45:17,707 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:45:17,707 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:45:17,709 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:45:17,710 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=746986, jitterRate=-0.050158217549324036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:45:17,710 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T20:45:17,711 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731357917694Writing region info on filesystem at 1731357917694Initializing all the Stores at 1731357917695 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357917695Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357917695Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357917695Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357917695Cleaning up temporary data from old regions at 1731357917707 (+12 ms)Running coprocessor post-open hooks at 1731357917710 (+3 ms)Region opened successfully at 1731357917711 (+1 ms) 2024-11-11T20:45:17,712 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731357917656 2024-11-11T20:45:17,715 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T20:45:17,715 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T20:45:17,716 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:17,717 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,38547,1731357916578, state=OPEN 2024-11-11T20:45:17,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:45:17,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:45:17,719 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:17,719 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:45:17,719 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:45:17,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T20:45:17,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,38547,1731357916578 in 217 msec 2024-11-11T20:45:17,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T20:45:17,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 379 msec 2024-11-11T20:45:17,727 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:45:17,727 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T20:45:17,729 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:45:17,729 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,38547,1731357916578, seqNum=-1] 2024-11-11T20:45:17,729 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:45:17,730 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56081, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:45:17,737 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 866 msec 2024-11-11T20:45:17,737 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731357917737, completionTime=-1 2024-11-11T20:45:17,738 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T20:45:17,738 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T20:45:17,740 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T20:45:17,740 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731357977740 2024-11-11T20:45:17,740 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731358037740 2024-11-11T20:45:17,740 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-11T20:45:17,741 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39527,1731357916531-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,741 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39527,1731357916531-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,741 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39527,1731357916531-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,741 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-51ca66f7ee3c:39527, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,741 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,741 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,743 DEBUG [master/51ca66f7ee3c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T20:45:17,746 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.123sec 2024-11-11T20:45:17,746 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T20:45:17,746 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T20:45:17,746 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T20:45:17,746 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T20:45:17,746 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T20:45:17,746 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39527,1731357916531-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:45:17,746 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39527,1731357916531-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T20:45:17,748 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T20:45:17,749 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T20:45:17,749 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,39527,1731357916531-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,786 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T20:45:17,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:17,817 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@565a7a6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:45:17,817 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 51ca66f7ee3c,39527,-1 for getting cluster id 2024-11-11T20:45:17,817 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T20:45:17,819 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'bad0e12b-fe87-44f4-8c5e-8909d4f33354' 2024-11-11T20:45:17,819 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T20:45:17,819 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "bad0e12b-fe87-44f4-8c5e-8909d4f33354" 2024-11-11T20:45:17,820 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7538bf8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:45:17,820 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [51ca66f7ee3c,39527,-1] 2024-11-11T20:45:17,820 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T20:45:17,821 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:17,823 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37666, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T20:45:17,824 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5157b857, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:45:17,824 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:45:17,826 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,38547,1731357916578, seqNum=-1] 2024-11-11T20:45:17,826 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:45:17,828 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37226, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:45:17,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:17,830 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=51ca66f7ee3c,39527,1731357916531 2024-11-11T20:45:17,831 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:17,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:17,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:17,834 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T20:45:17,855 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:45:17,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:17,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:17,855 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:45:17,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:45:17,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:45:17,856 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T20:45:17,856 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:45:17,857 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46139 2024-11-11T20:45:17,858 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46139 connecting to ZooKeeper ensemble=127.0.0.1:57310 2024-11-11T20:45:17,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:17,862 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:45:17,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:461390x0, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:45:17,866 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-11T20:45:17,866 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:46139-0x100308899d10002, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-11T20:45:17,866 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46139-0x100308899d10002 connected 2024-11-11T20:45:17,867 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T20:45:17,869 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T20:45:17,869 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:46139-0x100308899d10002, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T20:45:17,872 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46139-0x100308899d10002, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:45:17,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46139 2024-11-11T20:45:17,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46139 2024-11-11T20:45:17,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46139 2024-11-11T20:45:17,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46139 2024-11-11T20:45:17,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46139 2024-11-11T20:45:17,880 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(746): ClusterId : bad0e12b-fe87-44f4-8c5e-8909d4f33354 2024-11-11T20:45:17,880 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T20:45:17,882 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T20:45:17,882 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T20:45:17,883 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T20:45:17,884 DEBUG [RS:1;51ca66f7ee3c:46139 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fe73e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:45:17,896 DEBUG [RS:1;51ca66f7ee3c:46139 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;51ca66f7ee3c:46139 2024-11-11T20:45:17,896 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T20:45:17,896 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T20:45:17,896 DEBUG [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T20:45:17,897 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(2659): reportForDuty to master=51ca66f7ee3c,39527,1731357916531 with port=46139, startcode=1731357917854 2024-11-11T20:45:17,897 DEBUG [RS:1;51ca66f7ee3c:46139 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T20:45:17,899 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49965, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T20:45:17,899 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39527 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 51ca66f7ee3c,46139,1731357917854 2024-11-11T20:45:17,899 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39527 {}] master.ServerManager(517): Registering regionserver=51ca66f7ee3c,46139,1731357917854 2024-11-11T20:45:17,901 DEBUG [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411 2024-11-11T20:45:17,901 DEBUG [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46503 2024-11-11T20:45:17,901 DEBUG [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T20:45:17,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:45:17,903 DEBUG [RS:1;51ca66f7ee3c:46139 {}] zookeeper.ZKUtil(111): regionserver:46139-0x100308899d10002, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/51ca66f7ee3c,46139,1731357917854 2024-11-11T20:45:17,903 WARN [RS:1;51ca66f7ee3c:46139 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:45:17,903 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [51ca66f7ee3c,46139,1731357917854] 2024-11-11T20:45:17,903 INFO [RS:1;51ca66f7ee3c:46139 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:45:17,903 DEBUG [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854 2024-11-11T20:45:17,908 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T20:45:17,910 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T20:45:17,913 INFO [RS:1;51ca66f7ee3c:46139 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T20:45:17,913 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,913 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T20:45:17,914 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T20:45:17,914 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,914 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,914 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,914 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,914 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,914 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,914 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:45:17,915 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,915 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,915 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,915 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,915 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,915 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:45:17,915 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:45:17,915 DEBUG [RS:1;51ca66f7ee3c:46139 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:45:17,915 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,915 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,916 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,916 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,916 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,916 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,46139,1731357917854-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:45:17,930 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T20:45:17,930 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,46139,1731357917854-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,930 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,930 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.Replication(171): 51ca66f7ee3c,46139,1731357917854 started 2024-11-11T20:45:17,943 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:45:17,943 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(1482): Serving as 51ca66f7ee3c,46139,1731357917854, RpcServer on 51ca66f7ee3c/172.17.0.2:46139, sessionid=0x100308899d10002 2024-11-11T20:45:17,943 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;51ca66f7ee3c:46139,5,FailOnTimeoutGroup] 2024-11-11T20:45:17,943 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T20:45:17,943 DEBUG [RS:1;51ca66f7ee3c:46139 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 51ca66f7ee3c,46139,1731357917854 2024-11-11T20:45:17,943 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,46139,1731357917854' 2024-11-11T20:45:17,943 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T20:45:17,943 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-11T20:45:17,944 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T20:45:17,944 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T20:45:17,944 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T20:45:17,945 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T20:45:17,945 DEBUG [RS:1;51ca66f7ee3c:46139 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 51ca66f7ee3c,46139,1731357917854 2024-11-11T20:45:17,945 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,46139,1731357917854' 2024-11-11T20:45:17,945 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T20:45:17,945 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T20:45:17,945 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 51ca66f7ee3c,39527,1731357916531 2024-11-11T20:45:17,945 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@18ef1e1 2024-11-11T20:45:17,945 DEBUG [RS:1;51ca66f7ee3c:46139 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T20:45:17,946 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T20:45:17,946 INFO [RS:1;51ca66f7ee3c:46139 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T20:45:17,946 INFO [RS:1;51ca66f7ee3c:46139 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T20:45:17,948 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37672, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T20:45:17,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39527 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T20:45:17,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39527 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T20:45:17,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39527 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:45:17,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39527 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T20:45:17,952 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T20:45:17,952 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:17,952 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39527 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-11T20:45:17,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39527 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T20:45:17,953 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T20:45:17,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741835_1011 (size=393) 2024-11-11T20:45:17,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33779 is added to blk_1073741835_1011 (size=393) 2024-11-11T20:45:17,963 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1e16cb99ec81e6ac94635a2a5c2ce30d, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411 2024-11-11T20:45:17,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741836_1012 (size=76) 2024-11-11T20:45:17,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33779 is added to blk_1073741836_1012 (size=76) 2024-11-11T20:45:17,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:45:17,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 1e16cb99ec81e6ac94635a2a5c2ce30d, disabling compactions & flushes 2024-11-11T20:45:17,972 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:17,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:17,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. after waiting 0 ms 2024-11-11T20:45:17,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:17,972 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:17,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: Waiting for close lock at 1731357917972Disabling compacts and flushes for region at 1731357917972Disabling writes for close at 1731357917972Writing region close event to WAL at 1731357917972Closed at 1731357917972 2024-11-11T20:45:17,974 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T20:45:17,974 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731357917974"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731357917974"}]},"ts":"1731357917974"} 2024-11-11T20:45:17,977 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T20:45:17,978 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T20:45:17,979 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731357917978"}]},"ts":"1731357917978"} 2024-11-11T20:45:17,981 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-11T20:45:17,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1e16cb99ec81e6ac94635a2a5c2ce30d, ASSIGN}] 2024-11-11T20:45:17,983 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1e16cb99ec81e6ac94635a2a5c2ce30d, ASSIGN 2024-11-11T20:45:17,984 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1e16cb99ec81e6ac94635a2a5c2ce30d, ASSIGN; state=OFFLINE, location=51ca66f7ee3c,38547,1731357916578; forceNewPlan=false, retain=false 2024-11-11T20:45:18,050 INFO [RS:1;51ca66f7ee3c:46139 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C46139%2C1731357917854, suffix=, logDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854, archiveDir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs, maxLogs=32 2024-11-11T20:45:18,051 INFO [RS:1;51ca66f7ee3c:46139 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 2024-11-11T20:45:18,060 INFO [RS:1;51ca66f7ee3c:46139 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 2024-11-11T20:45:18,061 DEBUG [RS:1;51ca66f7ee3c:46139 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34011:34011),(127.0.0.1/127.0.0.1:34609:34609)] 2024-11-11T20:45:18,135 INFO [51ca66f7ee3c:39527 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T20:45:18,136 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1e16cb99ec81e6ac94635a2a5c2ce30d, regionState=OPENING, regionLocation=51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:18,141 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1e16cb99ec81e6ac94635a2a5c2ce30d, ASSIGN because future has completed 2024-11-11T20:45:18,142 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1e16cb99ec81e6ac94635a2a5c2ce30d, server=51ca66f7ee3c,38547,1731357916578}] 2024-11-11T20:45:18,306 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:18,306 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1e16cb99ec81e6ac94635a2a5c2ce30d, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:45:18,307 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,307 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:45:18,308 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,308 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,310 INFO [StoreOpener-1e16cb99ec81e6ac94635a2a5c2ce30d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,312 INFO [StoreOpener-1e16cb99ec81e6ac94635a2a5c2ce30d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e16cb99ec81e6ac94635a2a5c2ce30d columnFamilyName info 2024-11-11T20:45:18,312 DEBUG [StoreOpener-1e16cb99ec81e6ac94635a2a5c2ce30d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:45:18,313 INFO [StoreOpener-1e16cb99ec81e6ac94635a2a5c2ce30d-1 {}] regionserver.HStore(327): Store=1e16cb99ec81e6ac94635a2a5c2ce30d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:45:18,313 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,315 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,315 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,316 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,316 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,318 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,320 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:45:18,321 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1e16cb99ec81e6ac94635a2a5c2ce30d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813281, jitterRate=0.034141361713409424}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T20:45:18,321 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:18,321 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: Running coprocessor pre-open hook at 1731357918308Writing region info on filesystem at 1731357918308Initializing all the Stores at 1731357918309 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357918309Cleaning up temporary data from old regions at 1731357918316 (+7 ms)Running coprocessor post-open hooks at 1731357918321 (+5 ms)Region opened successfully at 1731357918321 2024-11-11T20:45:18,323 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d., pid=6, masterSystemTime=1731357918300 2024-11-11T20:45:18,325 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:18,325 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:18,327 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1e16cb99ec81e6ac94635a2a5c2ce30d, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:18,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1e16cb99ec81e6ac94635a2a5c2ce30d, server=51ca66f7ee3c,38547,1731357916578 because future has completed 2024-11-11T20:45:18,333 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T20:45:18,333 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1e16cb99ec81e6ac94635a2a5c2ce30d, server=51ca66f7ee3c,38547,1731357916578 in 189 msec 2024-11-11T20:45:18,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T20:45:18,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1e16cb99ec81e6ac94635a2a5c2ce30d, ASSIGN in 352 msec 2024-11-11T20:45:18,337 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T20:45:18,337 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731357918337"}]},"ts":"1731357918337"} 2024-11-11T20:45:18,339 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-11T20:45:18,340 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T20:45:18,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 391 msec 2024-11-11T20:45:23,199 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T20:45:23,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:23,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:23,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:23,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:23,230 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-11T20:45:26,830 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T20:45:26,831 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T20:45:26,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T20:45:26,833 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-11T20:45:26,835 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:45:26,835 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T20:45:27,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39527 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T20:45:27,991 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-11T20:45:27,991 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-11T20:45:27,998 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T20:45:27,998 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:28,012 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:28,015 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:45:28,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:45:28,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:45:28,016 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:45:28,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d7fa207{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:45:28,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cc6081e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:45:28,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6333ed10{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/java.io.tmpdir/jetty-localhost-45831-hadoop-hdfs-3_4_1-tests_jar-_-any-166667760581690908/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:28,112 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@daf04c{HTTP/1.1, (http/1.1)}{localhost:45831} 2024-11-11T20:45:28,112 INFO [Time-limited test {}] server.Server(415): Started @117336ms 2024-11-11T20:45:28,113 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:45:28,145 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:28,149 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:45:28,150 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:45:28,150 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:45:28,150 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:45:28,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71810790{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:45:28,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71bfb2ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:45:28,179 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data5/current/BP-797088966-172.17.0.2-1731357915882/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:28,179 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data6/current/BP-797088966-172.17.0.2-1731357915882/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:28,198 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:45:28,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa0f6adf8b49a7717 with lease ID 0xa8e7f64ec2ddd906: Processing first storage report for DS-55ef0139-4ca1-418c-82b8-09287d423f78 from datanode DatanodeRegistration(127.0.0.1:45495, datanodeUuid=2297c9c6-ce23-432b-b64d-89ec260b5997, infoPort=41815, infoSecurePort=0, ipcPort=44633, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882) 2024-11-11T20:45:28,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa0f6adf8b49a7717 with lease ID 0xa8e7f64ec2ddd906: from storage DS-55ef0139-4ca1-418c-82b8-09287d423f78 node DatanodeRegistration(127.0.0.1:45495, datanodeUuid=2297c9c6-ce23-432b-b64d-89ec260b5997, infoPort=41815, infoSecurePort=0, ipcPort=44633, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:28,201 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa0f6adf8b49a7717 with lease ID 0xa8e7f64ec2ddd906: Processing first storage report for DS-7b08a3d1-746d-4094-b1cf-3297347a2b2b from datanode DatanodeRegistration(127.0.0.1:45495, datanodeUuid=2297c9c6-ce23-432b-b64d-89ec260b5997, infoPort=41815, infoSecurePort=0, ipcPort=44633, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882) 2024-11-11T20:45:28,201 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa0f6adf8b49a7717 with lease ID 0xa8e7f64ec2ddd906: from storage DS-7b08a3d1-746d-4094-b1cf-3297347a2b2b node DatanodeRegistration(127.0.0.1:45495, datanodeUuid=2297c9c6-ce23-432b-b64d-89ec260b5997, infoPort=41815, infoSecurePort=0, ipcPort=44633, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:28,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58f0ef15{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/java.io.tmpdir/jetty-localhost-33281-hadoop-hdfs-3_4_1-tests_jar-_-any-11851528388696054851/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:28,253 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a3a27bb{HTTP/1.1, (http/1.1)}{localhost:33281} 2024-11-11T20:45:28,253 INFO [Time-limited test {}] server.Server(415): Started @117478ms 2024-11-11T20:45:28,254 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:45:28,293 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:28,296 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:45:28,298 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:45:28,298 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:45:28,299 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:45:28,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2801262{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:45:28,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21f536ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:45:28,325 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data7/current/BP-797088966-172.17.0.2-1731357915882/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:28,325 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data8/current/BP-797088966-172.17.0.2-1731357915882/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:28,340 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:45:28,342 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xed5a03ce28e5cb32 with lease ID 0xa8e7f64ec2ddd907: Processing first storage report for DS-f0c35d67-70b1-48b7-ad47-e78642e496e4 from datanode DatanodeRegistration(127.0.0.1:43949, datanodeUuid=05dd4fb0-6f13-46cb-bd5b-ca9a45534487, infoPort=35277, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882) 2024-11-11T20:45:28,342 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xed5a03ce28e5cb32 with lease ID 0xa8e7f64ec2ddd907: from storage DS-f0c35d67-70b1-48b7-ad47-e78642e496e4 node DatanodeRegistration(127.0.0.1:43949, datanodeUuid=05dd4fb0-6f13-46cb-bd5b-ca9a45534487, infoPort=35277, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:28,343 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xed5a03ce28e5cb32 with lease ID 0xa8e7f64ec2ddd907: Processing first storage report for DS-382127fd-6fc3-4abc-8ea4-0b77adcb117e from datanode DatanodeRegistration(127.0.0.1:43949, datanodeUuid=05dd4fb0-6f13-46cb-bd5b-ca9a45534487, infoPort=35277, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882) 2024-11-11T20:45:28,343 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xed5a03ce28e5cb32 with lease ID 0xa8e7f64ec2ddd907: from storage DS-382127fd-6fc3-4abc-8ea4-0b77adcb117e node DatanodeRegistration(127.0.0.1:43949, datanodeUuid=05dd4fb0-6f13-46cb-bd5b-ca9a45534487, infoPort=35277, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:28,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@542c34dc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/java.io.tmpdir/jetty-localhost-36617-hadoop-hdfs-3_4_1-tests_jar-_-any-2413474766206520384/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:28,407 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f0a2519{HTTP/1.1, (http/1.1)}{localhost:36617} 2024-11-11T20:45:28,407 INFO [Time-limited test {}] server.Server(415): Started @117631ms 2024-11-11T20:45:28,408 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:45:28,466 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9/current/BP-797088966-172.17.0.2-1731357915882/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:28,466 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10/current/BP-797088966-172.17.0.2-1731357915882/current, will proceed with Du for space computation calculation, 2024-11-11T20:45:28,483 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:45:28,486 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd923dc105ad960a2 with lease ID 0xa8e7f64ec2ddd908: Processing first storage report for DS-e3fa3e60-f541-40f6-a9fb-687b70034142 from datanode DatanodeRegistration(127.0.0.1:35329, datanodeUuid=c426f781-4da7-4c04-9fda-4773aa7ad7fa, infoPort=36909, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882) 2024-11-11T20:45:28,486 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd923dc105ad960a2 with lease ID 0xa8e7f64ec2ddd908: from storage DS-e3fa3e60-f541-40f6-a9fb-687b70034142 node DatanodeRegistration(127.0.0.1:35329, datanodeUuid=c426f781-4da7-4c04-9fda-4773aa7ad7fa, infoPort=36909, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:28,486 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd923dc105ad960a2 with lease ID 0xa8e7f64ec2ddd908: Processing first storage report for DS-6bcf88e1-9210-49ab-8ec3-ffc0a2e5efec from datanode DatanodeRegistration(127.0.0.1:35329, datanodeUuid=c426f781-4da7-4c04-9fda-4773aa7ad7fa, infoPort=36909, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882) 2024-11-11T20:45:28,486 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd923dc105ad960a2 with lease ID 0xa8e7f64ec2ddd908: from storage DS-6bcf88e1-9210-49ab-8ec3-ffc0a2e5efec node DatanodeRegistration(127.0.0.1:35329, datanodeUuid=c426f781-4da7-4c04-9fda-4773aa7ad7fa, infoPort=36909, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:28,527 WARN [ResponseProcessor for block BP-797088966-172.17.0.2-1731357915882:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-797088966-172.17.0.2-1731357915882:blk_1073741832_1008 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,527 WARN [ResponseProcessor for block BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,527 WARN [ResponseProcessor for block BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,527 WARN [ResponseProcessor for block BP-797088966-172.17.0.2-1731357915882:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-797088966-172.17.0.2-1731357915882:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-797088966-172.17.0.2-1731357915882:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,528 WARN [DataStreamer for file /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta block BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:28,528 WARN [DataStreamer for file /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 block BP-797088966-172.17.0.2-1731357915882:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:28,528 WARN [DataStreamer for file /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 block BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:28,529 WARN [DataStreamer for file /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531/51ca66f7ee3c%2C39527%2C1731357916531.1731357916757 block BP-797088966-172.17.0.2-1731357915882:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:28,528 WARN [PacketResponder: BP-797088966-172.17.0.2-1731357915882:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39897] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,528 WARN [PacketResponder: BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39897] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,529 WARN [PacketResponder: BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39897] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:58866 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33779:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58866 dst: /127.0.0.1:33779 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_742528423_22 at /127.0.0.1:58890 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:33779:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58890 dst: /127.0.0.1:33779 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,530 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:37806 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:39897:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37806 dst: /127.0.0.1:39897 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:58848 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:33779:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58848 dst: /127.0.0.1:33779 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-117607780_22 at /127.0.0.1:58830 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33779:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58830 dst: /127.0.0.1:33779 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_742528423_22 at /127.0.0.1:37838 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39897:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37838 dst: /127.0.0.1:39897 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-117607780_22 at /127.0.0.1:37794 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39897:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37794 dst: /127.0.0.1:39897 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,533 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75de815b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:28,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:37822 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39897:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37822 dst: /127.0.0.1:39897 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,534 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fe8f7b0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:45:28,535 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:45:28,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d3d4ef0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:45:28,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75096fee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,STOPPED} 2024-11-11T20:45:28,538 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:45:28,538 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:45:28,538 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-797088966-172.17.0.2-1731357915882 (Datanode Uuid 7e594afa-bcac-4748-944c-21b12f1087c4) service to localhost/127.0.0.1:46503 2024-11-11T20:45:28,538 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:45:28,538 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data3/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:28,538 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data4/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:28,539 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:45:28,539 WARN [DataStreamer for file /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 block BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,539 WARN [DataStreamer for file /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 block BP-797088966-172.17.0.2-1731357915882:blk_1073741832_1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741832_1008 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,539 WARN [DataStreamer for file /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta block BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,541 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@71fa2b9f {}] datanode.DataXceiver(331): 127.0.0.1:33779:DataXceiver error processing unknown operation src: /127.0.0.1:49522 dst: /127.0.0.1:33779 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:28,541 WARN [DataStreamer for file /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531/51ca66f7ee3c%2C39527%2C1731357916531.1731357916757 block BP-797088966-172.17.0.2-1731357915882:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,543 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24befc55{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:28,543 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@259c861e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:45:28,543 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:45:28,543 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35a03ba9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:45:28,543 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49b91397{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,STOPPED} 2024-11-11T20:45:28,544 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:45:28,544 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:45:28,544 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-797088966-172.17.0.2-1731357915882 (Datanode Uuid 839acd95-50b4-4887-ada9-927bf6b5fa3c) service to localhost/127.0.0.1:46503 2024-11-11T20:45:28,544 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:45:28,545 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data1/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:28,545 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data2/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:28,545 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:45:28,549 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d., hostname=51ca66f7ee3c,38547,1731357916578, seqNum=2] 2024-11-11T20:45:28,550 ERROR [FSHLog-0-hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411-prefix:51ca66f7ee3c,38547,1731357916578 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,550 WARN [FSHLog-0-hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411-prefix:51ca66f7ee3c,38547,1731357916578 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,550 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,551 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C38547%2C1731357916578:(num 1731357917123) roll requested 2024-11-11T20:45:28,551 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C38547%2C1731357916578.1731357928551 2024-11-11T20:45:28,554 WARN [Thread-900 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,554 WARN [Thread-900 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:28,554 WARN [Thread-900 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741838_1018 2024-11-11T20:45:28,556 WARN [Thread-900 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:28,562 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:28,563 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:28,563 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:28,563 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:28,563 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:28,563 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357928551 2024-11-11T20:45:28,564 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,564 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:28,564 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41815:41815),(127.0.0.1/127.0.0.1:35277:35277)] 2024-11-11T20:45:28,564 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 is not closed yet, will try archiving it next time 2024-11-11T20:45:28,565 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-11T20:45:28,565 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-11T20:45:28,565 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 2024-11-11T20:45:28,568 WARN [IPC Server handler 2 on default port 46503 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741832_1008 2024-11-11T20:45:28,571 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 after 5ms 2024-11-11T20:45:28,703 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:29,916 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:30,564 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:30,566 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357928551 2024-11-11T20:45:30,567 WARN [ResponseProcessor for block BP-797088966-172.17.0.2-1731357915882:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-797088966-172.17.0.2-1731357915882:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:30,568 WARN [DataStreamer for file /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357928551 block BP-797088966-172.17.0.2-1731357915882:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:30,568 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:35146 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:45495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35146 dst: /127.0.0.1:45495 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:30,569 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:36440 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:43949:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36440 dst: /127.0.0.1:43949 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:30,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6333ed10{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:30,572 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@daf04c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:45:30,573 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:45:30,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cc6081e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:45:30,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d7fa207{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,STOPPED} 2024-11-11T20:45:30,576 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:45:30,576 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-797088966-172.17.0.2-1731357915882 (Datanode Uuid 2297c9c6-ce23-432b-b64d-89ec260b5997) service to localhost/127.0.0.1:46503 2024-11-11T20:45:30,576 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data5/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:30,576 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data6/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:30,576 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:45:30,576 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:45:30,577 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:45:30,704 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:31,917 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:32,565 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:32,566 WARN [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]] 2024-11-11T20:45:32,566 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C38547%2C1731357916578:(num 1731357928551) roll requested 2024-11-11T20:45:32,566 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C38547%2C1731357916578.1731357932566 2024-11-11T20:45:32,573 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 after 4008ms 2024-11-11T20:45:32,574 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38808 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741840_1022 to mirror 127.0.0.1:39897 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:32,574 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39897 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:32,574 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:32,574 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38808 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T20:45:32,574 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741840_1022 2024-11-11T20:45:32,575 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38808 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38808 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:32,575 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] 2024-11-11T20:45:32,578 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45495 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:32,578 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:39602 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data8]'}, localName='127.0.0.1:43949', datanodeUuid='05dd4fb0-6f13-46cb-bd5b-ca9a45534487', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741841_1023 to mirror 127.0.0.1:45495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:32,578 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK], DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:32,578 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741841_1023 2024-11-11T20:45:32,578 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:39602 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T20:45:32,578 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:39602 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:43949:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39602 dst: /127.0.0.1:43949 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:32,579 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:32,581 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33779 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:32,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:39610 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data8]'}, localName='127.0.0.1:43949', datanodeUuid='05dd4fb0-6f13-46cb-bd5b-ca9a45534487', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741842_1024 to mirror 127.0.0.1:33779 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:32,581 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T20:45:32,581 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:32,581 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:39610 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T20:45:32,581 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741842_1024 2024-11-11T20:45:32,582 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:39610 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:43949:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39610 dst: /127.0.0.1:43949 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:32,582 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:32,587 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:32,587 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:32,587 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:32,587 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:32,587 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:32,587 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357928551 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357932566 2024-11-11T20:45:32,588 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36909:36909),(127.0.0.1/127.0.0.1:35277:35277)] 2024-11-11T20:45:32,588 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 is not closed yet, will try archiving it next time 2024-11-11T20:45:32,588 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357928551 is not closed yet, will try archiving it next time 2024-11-11T20:45:32,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43949 is added to blk_1073741839_1021 (size=3600) 2024-11-11T20:45:32,704 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:32,991 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 is not closed yet, will try archiving it next time 2024-11-11T20:45:33,917 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741839_1021 (size=3600) 2024-11-11T20:45:34,586 WARN [ResponseProcessor for block BP-797088966-172.17.0.2-1731357915882:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-797088966-172.17.0.2-1731357915882:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-797088966-172.17.0.2-1731357915882:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,587 WARN [DataStreamer for file /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357932566 block BP-797088966-172.17.0.2-1731357915882:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:34,587 WARN [PacketResponder: BP-797088966-172.17.0.2-1731357915882:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43949] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:34,588 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,589 WARN [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]] 2024-11-11T20:45:34,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38824 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38824 dst: /127.0.0.1:35329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:34,589 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C38547%2C1731357916578:(num 1731357932566) roll requested 2024-11-11T20:45:34,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:39624 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:43949:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39624 dst: /127.0.0.1:43949 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:34,590 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C38547%2C1731357916578.1731357934589 2024-11-11T20:45:34,590 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58f0ef15{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:34,591 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a3a27bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:45:34,591 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:45:34,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71bfb2ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:45:34,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71810790{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,STOPPED} 2024-11-11T20:45:34,592 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:45:34,592 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:45:34,592 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-797088966-172.17.0.2-1731357915882 (Datanode Uuid 05dd4fb0-6f13-46cb-bd5b-ca9a45534487) service to localhost/127.0.0.1:46503 2024-11-11T20:45:34,592 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:45:34,592 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data7/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:34,592 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data8/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:45:34,593 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:45:34,595 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,595 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK], DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:34,595 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741844_1027 2024-11-11T20:45:34,596 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] 2024-11-11T20:45:34,597 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,597 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK], DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:34,597 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741845_1028 2024-11-11T20:45:34,598 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:34,599 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,599 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:34,599 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741846_1029 2024-11-11T20:45:34,600 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:34,602 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33779 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,602 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38852 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741847_1030 to mirror 127.0.0.1:33779 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:34,603 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:34,603 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38852 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T20:45:34,603 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741847_1030 2024-11-11T20:45:34,603 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38852 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38852 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:34,603 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:34,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38547 {}] regionserver.HRegion(8855): Flush requested on 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:34,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1e16cb99ec81e6ac94635a2a5c2ce30d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:45:34,604 WARN [IPC Server handler 4 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T20:45:34,605 WARN [IPC Server handler 4 on default port 46503 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T20:45:34,605 WARN [IPC Server handler 4 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T20:45:34,609 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:34,609 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:34,609 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:34,609 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:34,609 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:34,609 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357932566 with entries=6, filesize=6.11 KB; new WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357934589 2024-11-11T20:45:34,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741843_1026 (size=6261) 2024-11-11T20:45:34,612 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 is not closed yet, will try archiving it next time 2024-11-11T20:45:34,618 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36909:36909)] 2024-11-11T20:45:34,618 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 is not closed yet, will try archiving it next time 2024-11-11T20:45:34,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/5b8589a3d1cd4a1c92b144ef914406c2 is 1080, key is row0002/info:/1731357930578/Put/seqid=0 2024-11-11T20:45:34,624 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33779 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38864 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741849_1032 to mirror 127.0.0.1:33779 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:34,624 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:34,625 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741849_1032 2024-11-11T20:45:34,625 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38864 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:34,625 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38864 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38864 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:34,625 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:34,626 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,627 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK], DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:34,627 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741850_1033 2024-11-11T20:45:34,627 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:34,629 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45495 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,629 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38874 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741851_1034 to mirror 127.0.0.1:45495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:34,630 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:34,630 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741851_1034 2024-11-11T20:45:34,630 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38874 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:34,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38874 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38874 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:34,630 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:34,631 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:34,632 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:34,632 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741852_1035 2024-11-11T20:45:34,632 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] 2024-11-11T20:45:34,633 WARN [IPC Server handler 4 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T20:45:34,633 WARN [IPC Server handler 4 on default port 46503 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T20:45:34,633 WARN [IPC Server handler 4 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T20:45:34,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741853_1036 (size=10347) 2024-11-11T20:45:34,705 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:35,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/5b8589a3d1cd4a1c92b144ef914406c2 2024-11-11T20:45:35,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/5b8589a3d1cd4a1c92b144ef914406c2 as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5b8589a3d1cd4a1c92b144ef914406c2 2024-11-11T20:45:35,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5b8589a3d1cd4a1c92b144ef914406c2, entries=5, sequenceid=11, filesize=10.1 K 2024-11-11T20:45:35,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 1e16cb99ec81e6ac94635a2a5c2ce30d in 458ms, sequenceid=11, compaction requested=false 2024-11-11T20:45:35,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: 2024-11-11T20:45:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38547 {}] regionserver.HRegion(8855): Flush requested on 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:35,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1e16cb99ec81e6ac94635a2a5c2ce30d 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-11T20:45:35,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/d7364ba7f42d46dc88a2916caea5fd18 is 1080, key is row0007/info:/1731357934605/Put/seqid=0 2024-11-11T20:45:35,252 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:35,252 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:35,253 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741854_1037 2024-11-11T20:45:35,253 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:35,255 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39897 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:35,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38906 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741855_1038 to mirror 127.0.0.1:39897 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:35,255 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:35,255 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741855_1038 2024-11-11T20:45:35,255 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38906 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:35,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38906 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38906 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:35,256 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] 2024-11-11T20:45:35,257 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:35,257 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:35,257 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741856_1039 2024-11-11T20:45:35,258 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:35,259 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:35,259 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:35,259 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741857_1040 2024-11-11T20:45:35,260 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:35,260 WARN [IPC Server handler 4 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T20:45:35,260 WARN [IPC Server handler 4 on default port 46503 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T20:45:35,260 WARN [IPC Server handler 4 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T20:45:35,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741858_1041 (size=12506) 2024-11-11T20:45:35,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/d7364ba7f42d46dc88a2916caea5fd18 2024-11-11T20:45:35,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/d7364ba7f42d46dc88a2916caea5fd18 as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/d7364ba7f42d46dc88a2916caea5fd18 2024-11-11T20:45:35,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/d7364ba7f42d46dc88a2916caea5fd18, entries=7, sequenceid=24, filesize=12.2 K 2024-11-11T20:45:35,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 1e16cb99ec81e6ac94635a2a5c2ce30d in 431ms, sequenceid=24, compaction requested=false 2024-11-11T20:45:35,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: 2024-11-11T20:45:35,677 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-11T20:45:35,677 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:35,677 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/d7364ba7f42d46dc88a2916caea5fd18 because midkey is the same as first or last row 2024-11-11T20:45:35,918 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:36,619 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:36,619 WARN [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]] 2024-11-11T20:45:36,619 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C38547%2C1731357916578:(num 1731357934589) roll requested 2024-11-11T20:45:36,620 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C38547%2C1731357916578.1731357936619 2024-11-11T20:45:36,624 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:36,624 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:36,624 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741859_1042 2024-11-11T20:45:36,625 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] 2024-11-11T20:45:36,627 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:36,628 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:36,628 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741860_1043 2024-11-11T20:45:36,629 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:36,630 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:36,630 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:36,630 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741861_1044 2024-11-11T20:45:36,631 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:36,633 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45495 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:36,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38922 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741862_1045 to mirror 127.0.0.1:45495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:36,633 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:36,633 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741862_1045 2024-11-11T20:45:36,633 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38922 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T20:45:36,634 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38922 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38922 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:36,634 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:36,635 WARN [IPC Server handler 2 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T20:45:36,635 WARN [IPC Server handler 2 on default port 46503 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T20:45:36,635 WARN [IPC Server handler 2 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T20:45:36,642 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:36,642 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:36,642 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:36,642 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:36,643 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:36,643 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357934589 with entries=18, filesize=18.21 KB; new WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357936619 2024-11-11T20:45:36,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741848_1031 (size=18655) 2024-11-11T20:45:36,652 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36909:36909)] 2024-11-11T20:45:36,652 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 is not closed yet, will try archiving it next time 2024-11-11T20:45:36,652 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357934589 is not closed yet, will try archiving it next time 2024-11-11T20:45:36,652 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357928551 to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs/51ca66f7ee3c%2C38547%2C1731357916578.1731357928551 2024-11-11T20:45:36,654 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357932566 to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs/51ca66f7ee3c%2C38547%2C1731357916578.1731357932566 2024-11-11T20:45:36,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38547 {}] regionserver.HRegion(8855): Flush requested on 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:36,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1e16cb99ec81e6ac94635a2a5c2ce30d 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T20:45:36,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/cdae654016a64e7697352c24186a042b is 1079, key is tmprow/info:/1731357936668/Put/seqid=0 2024-11-11T20:45:36,676 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:36,676 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:36,676 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741864_1047 2024-11-11T20:45:36,677 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:36,678 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:36,678 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:36,678 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741865_1048 2024-11-11T20:45:36,679 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] 2024-11-11T20:45:36,680 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:36,680 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:36,680 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741866_1049 2024-11-11T20:45:36,681 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:36,682 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43949 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:36,682 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38950 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741867_1050 to mirror 127.0.0.1:43949 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:36,683 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:36,683 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38950 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:36,683 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741867_1050 2024-11-11T20:45:36,683 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38950 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38950 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:36,683 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:36,684 WARN [IPC Server handler 0 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T20:45:36,684 WARN [IPC Server handler 0 on default port 46503 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T20:45:36,684 WARN [IPC Server handler 0 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T20:45:36,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741868_1051 (size=6027) 2024-11-11T20:45:36,705 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:37,045 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 is not closed yet, will try archiving it next time 2024-11-11T20:45:37,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/cdae654016a64e7697352c24186a042b 2024-11-11T20:45:37,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/cdae654016a64e7697352c24186a042b as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/cdae654016a64e7697352c24186a042b 2024-11-11T20:45:37,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/cdae654016a64e7697352c24186a042b, entries=1, sequenceid=34, filesize=5.9 K 2024-11-11T20:45:37,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 1e16cb99ec81e6ac94635a2a5c2ce30d in 435ms, sequenceid=34, compaction requested=true 2024-11-11T20:45:37,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: 2024-11-11T20:45:37,105 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-11T20:45:37,105 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:37,105 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/d7364ba7f42d46dc88a2916caea5fd18 because midkey is the same as first or last row 2024-11-11T20:45:37,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e16cb99ec81e6ac94635a2a5c2ce30d:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:45:37,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:45:37,105 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:45:37,107 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:45:37,107 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HStore(1541): 1e16cb99ec81e6ac94635a2a5c2ce30d/info is initiating minor compaction (all files) 2024-11-11T20:45:37,107 INFO [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1e16cb99ec81e6ac94635a2a5c2ce30d/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:37,107 INFO [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5b8589a3d1cd4a1c92b144ef914406c2, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/d7364ba7f42d46dc88a2916caea5fd18, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/cdae654016a64e7697352c24186a042b] into tmpdir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp, totalSize=28.2 K 2024-11-11T20:45:37,108 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5b8589a3d1cd4a1c92b144ef914406c2, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731357930578 2024-11-11T20:45:37,108 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] compactions.Compactor(225): Compacting d7364ba7f42d46dc88a2916caea5fd18, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731357934605 2024-11-11T20:45:37,109 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] compactions.Compactor(225): Compacting cdae654016a64e7697352c24186a042b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731357936668 2024-11-11T20:45:37,124 INFO [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e16cb99ec81e6ac94635a2a5c2ce30d#info#compaction#21 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:45:37,124 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/ce50be15af25431fa55cddf01b12be2c is 1080, key is row0002/info:/1731357930578/Put/seqid=0 2024-11-11T20:45:37,126 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:37,126 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:37,126 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741869_1052 2024-11-11T20:45:37,127 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:37,128 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:37,128 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:37,128 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741870_1053 2024-11-11T20:45:37,128 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:37,130 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:37,130 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:37,130 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741871_1054 2024-11-11T20:45:37,131 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:37,133 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39897 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:37,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38980 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741872_1055 to mirror 127.0.0.1:39897 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:37,133 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:37,133 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38980 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:37,133 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741872_1055 2024-11-11T20:45:37,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38980 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38980 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:37,134 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] 2024-11-11T20:45:37,134 WARN [IPC Server handler 2 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T20:45:37,134 WARN [IPC Server handler 2 on default port 46503 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T20:45:37,134 WARN [IPC Server handler 2 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T20:45:37,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741873_1056 (size=17994) 2024-11-11T20:45:37,492 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@56b48c83[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35329, datanodeUuid=c426f781-4da7-4c04-9fda-4773aa7ad7fa, infoPort=36909, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882):Failed to transfer BP-797088966-172.17.0.2-1731357915882:blk_1073741843_1026 to 127.0.0.1:45495 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:37,492 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@528b128d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35329, datanodeUuid=c426f781-4da7-4c04-9fda-4773aa7ad7fa, infoPort=36909, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882):Failed to transfer BP-797088966-172.17.0.2-1731357915882:blk_1073741853_1036 to 127.0.0.1:33779 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:37,547 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/ce50be15af25431fa55cddf01b12be2c as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/ce50be15af25431fa55cddf01b12be2c 2024-11-11T20:45:37,556 INFO [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1e16cb99ec81e6ac94635a2a5c2ce30d/info of 1e16cb99ec81e6ac94635a2a5c2ce30d into ce50be15af25431fa55cddf01b12be2c(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: 2024-11-11T20:45:37,556 INFO [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d., storeName=1e16cb99ec81e6ac94635a2a5c2ce30d/info, priority=13, startTime=1731357937105; duration=0sec 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/ce50be15af25431fa55cddf01b12be2c because midkey is the same as first or last row 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/ce50be15af25431fa55cddf01b12be2c because midkey is the same as first or last row 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/ce50be15af25431fa55cddf01b12be2c because midkey is the same as first or last row 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:45:37,556 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e16cb99ec81e6ac94635a2a5c2ce30d:info 2024-11-11T20:45:37,918 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38547 {}] regionserver.HRegion(8855): Flush requested on 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:38,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1e16cb99ec81e6ac94635a2a5c2ce30d 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T20:45:38,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/5d60da57ff8346f5b265d98e4a56ee9c is 1079, key is tmprow/info:/1731357938093/Put/seqid=0 2024-11-11T20:45:38,105 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:38,105 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:38,105 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741874_1057 2024-11-11T20:45:38,106 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:38,107 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:38,107 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:38,107 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741875_1058 2024-11-11T20:45:38,108 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:38,109 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:38,109 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:38,109 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741876_1059 2024-11-11T20:45:38,109 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:38,112 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39897 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:38,112 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38996 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741877_1060 to mirror 127.0.0.1:39897 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:38,112 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]) is bad. 2024-11-11T20:45:38,112 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741877_1060 2024-11-11T20:45:38,112 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38996 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:38,112 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:38996 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38996 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:38,113 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39897,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK] 2024-11-11T20:45:38,113 WARN [IPC Server handler 1 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T20:45:38,113 WARN [IPC Server handler 1 on default port 46503 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T20:45:38,113 WARN [IPC Server handler 1 on default port 46503 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T20:45:38,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741878_1061 (size=6027) 2024-11-11T20:45:38,495 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@528b128d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35329, datanodeUuid=c426f781-4da7-4c04-9fda-4773aa7ad7fa, infoPort=36909, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882):Failed to transfer BP-797088966-172.17.0.2-1731357915882:blk_1073741848_1031 to 127.0.0.1:45495 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:38,495 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@56b48c83[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35329, datanodeUuid=c426f781-4da7-4c04-9fda-4773aa7ad7fa, infoPort=36909, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882):Failed to transfer BP-797088966-172.17.0.2-1731357915882:blk_1073741858_1041 to 127.0.0.1:33779 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:38,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/5d60da57ff8346f5b265d98e4a56ee9c 2024-11-11T20:45:38,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/5d60da57ff8346f5b265d98e4a56ee9c as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5d60da57ff8346f5b265d98e4a56ee9c 2024-11-11T20:45:38,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5d60da57ff8346f5b265d98e4a56ee9c, entries=1, sequenceid=45, filesize=5.9 K 2024-11-11T20:45:38,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 1e16cb99ec81e6ac94635a2a5c2ce30d in 445ms, sequenceid=45, compaction requested=false 2024-11-11T20:45:38,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: 2024-11-11T20:45:38,539 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-11T20:45:38,539 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:38,540 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/ce50be15af25431fa55cddf01b12be2c because midkey is the same as first or last row 2024-11-11T20:45:38,653 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:38,653 WARN [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-11T20:45:38,706 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:38,717 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:45:38,721 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:45:38,725 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:45:38,725 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:45:38,725 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T20:45:38,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17a19f67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:45:38,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e87f24b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:45:38,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70a14b1c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/java.io.tmpdir/jetty-localhost-36249-hadoop-hdfs-3_4_1-tests_jar-_-any-16611949182684992505/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:45:38,820 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1929003e{HTTP/1.1, (http/1.1)}{localhost:36249} 2024-11-11T20:45:38,820 INFO [Time-limited test {}] server.Server(415): Started @128044ms 2024-11-11T20:45:38,821 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:45:38,897 WARN [Thread-982 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:45:38,903 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe5a55189ef7c0e0e with lease ID 0xa8e7f64ec2ddd909: from storage DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7 node DatanodeRegistration(127.0.0.1:42575, datanodeUuid=7e594afa-bcac-4748-944c-21b12f1087c4, infoPort=33083, infoSecurePort=0, ipcPort=41467, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:38,904 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe5a55189ef7c0e0e with lease ID 0xa8e7f64ec2ddd909: from storage DS-62aafb6d-a6de-47ab-852b-4940234b595e node DatanodeRegistration(127.0.0.1:42575, datanodeUuid=7e594afa-bcac-4748-944c-21b12f1087c4, infoPort=33083, infoSecurePort=0, ipcPort=41467, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:45:39,919 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:40,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741873_1056 (size=17994) 2024-11-11T20:45:40,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741868_1051 (size=6027) 2024-11-11T20:45:40,653 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:40,707 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:41,491 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@56b48c83[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35329, datanodeUuid=c426f781-4da7-4c04-9fda-4773aa7ad7fa, infoPort=36909, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882):Failed to transfer BP-797088966-172.17.0.2-1731357915882:blk_1073741878_1061 to 127.0.0.1:43949 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:41,920 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:42,654 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:42,707 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:43,920 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:44,655 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:44,708 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:45,920 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:46,512 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T20:45:46,655 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:46,708 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:46,890 ERROR [FSHLog-0-hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData-prefix:51ca66f7ee3c,39527,1731357916531 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:46,890 WARN [FSHLog-0-hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData-prefix:51ca66f7ee3c,39527,1731357916531 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:46,890 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C39527%2C1731357916531:(num 1731357916757) roll requested 2024-11-11T20:45:46,891 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C39527%2C1731357916531.1731357946890 2024-11-11T20:45:46,894 WARN [Thread-1004 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:46,894 WARN [Thread-1004 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:46,895 WARN [Thread-1004 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741879_1062 2024-11-11T20:45:46,895 WARN [Thread-1004 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:46,901 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:46,901 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:46,901 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:46,902 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:46,902 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:46,902 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531/51ca66f7ee3c%2C39527%2C1731357916531.1731357916757 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531/51ca66f7ee3c%2C39527%2C1731357916531.1731357946890 2024-11-11T20:45:46,903 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:46,903 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:46,903 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531/51ca66f7ee3c%2C39527%2C1731357916531.1731357916757 2024-11-11T20:45:46,903 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36909:36909),(127.0.0.1/127.0.0.1:33083:33083)] 2024-11-11T20:45:46,903 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531/51ca66f7ee3c%2C39527%2C1731357916531.1731357916757 is not closed yet, will try archiving it next time 2024-11-11T20:45:46,903 WARN [IPC Server handler 4 on default port 46503 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531/51ca66f7ee3c%2C39527%2C1731357916531.1731357916757 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1006 2024-11-11T20:45:46,904 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531/51ca66f7ee3c%2C39527%2C1731357916531.1731357916757 after 1ms 2024-11-11T20:45:47,921 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:48,656 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:48,920 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5c8459ce {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-797088966-172.17.0.2-1731357915882:blk_1073741832_1008, datanode=DatanodeInfoWithStorage[127.0.0.1:33779,null,null]) java.net.ConnectException: Call From 51ca66f7ee3c/172.17.0.2 to localhost:37701 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T20:45:48,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741832_1020 (size=455) 2024-11-11T20:45:49,598 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs/51ca66f7ee3c%2C38547%2C1731357916578.1731357917123 2024-11-11T20:45:49,601 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357934589 to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs/51ca66f7ee3c%2C38547%2C1731357916578.1731357934589 2024-11-11T20:45:49,905 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6d126609[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42575, datanodeUuid=7e594afa-bcac-4748-944c-21b12f1087c4, infoPort=33083, infoSecurePort=0, ipcPort=41467, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882):Failed to transfer BP-797088966-172.17.0.2-1731357915882:blk_1073741832_1020 to 127.0.0.1:43949 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:49,922 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:50,657 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:50,906 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/WALs/51ca66f7ee3c,39527,1731357916531/51ca66f7ee3c%2C39527%2C1731357916531.1731357916757 after 4003ms 2024-11-11T20:45:51,922 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:52,657 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:53,923 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,375 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C38547%2C1731357916578.1731357954374 2024-11-11T20:45:54,384 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43949 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,384 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-117607780_22 at /127.0.0.1:35764 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741881_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data4]'}, localName='127.0.0.1:42575', datanodeUuid='7e594afa-bcac-4748-944c-21b12f1087c4', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741881_1065 to mirror 127.0.0.1:43949 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,385 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42575,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:54,385 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741881_1065 2024-11-11T20:45:54,385 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-117607780_22 at /127.0.0.1:35764 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741881_1065] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T20:45:54,385 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-117607780_22 at /127.0.0.1:35764 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741881_1065] {}] datanode.DataXceiver(331): 127.0.0.1:42575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35764 dst: /127.0.0.1:42575 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,385 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:54,387 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,387 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:54,387 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741882_1066 2024-11-11T20:45:54,387 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:54,389 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,389 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:54,389 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741883_1067 2024-11-11T20:45:54,389 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:54,394 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,394 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,394 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,394 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,394 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,395 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357936619 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357954374 2024-11-11T20:45:54,395 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36909:36909),(127.0.0.1/127.0.0.1:33083:33083)] 2024-11-11T20:45:54,395 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357936619 is not closed yet, will try archiving it next time 2024-11-11T20:45:54,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741863_1046 (size=13591) 2024-11-11T20:45:54,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38547 {}] regionserver.HRegion(8855): Flush requested on 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:54,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1e16cb99ec81e6ac94635a2a5c2ce30d 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T20:45:54,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/5fe6c14a8bba483e9b3b5a2b411ae4d5 is 1080, key is row0013/info:/1731357954397/Put/seqid=0 2024-11-11T20:45:54,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741885_1069 (size=11421) 2024-11-11T20:45:54,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741885_1069 (size=11421) 2024-11-11T20:45:54,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/5fe6c14a8bba483e9b3b5a2b411ae4d5 2024-11-11T20:45:54,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/5fe6c14a8bba483e9b3b5a2b411ae4d5 as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5fe6c14a8bba483e9b3b5a2b411ae4d5 2024-11-11T20:45:54,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5fe6c14a8bba483e9b3b5a2b411ae4d5, entries=6, sequenceid=55, filesize=11.2 K 2024-11-11T20:45:54,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 1e16cb99ec81e6ac94635a2a5c2ce30d in 29ms, sequenceid=55, compaction requested=true 2024-11-11T20:45:54,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: 2024-11-11T20:45:54,436 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-11T20:45:54,436 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:54,436 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/ce50be15af25431fa55cddf01b12be2c because midkey is the same as first or last row 2024-11-11T20:45:54,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e16cb99ec81e6ac94635a2a5c2ce30d:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:45:54,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:45:54,436 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:45:54,438 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:45:54,438 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HStore(1541): 1e16cb99ec81e6ac94635a2a5c2ce30d/info is initiating minor compaction (all files) 2024-11-11T20:45:54,438 INFO [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1e16cb99ec81e6ac94635a2a5c2ce30d/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:54,438 INFO [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/ce50be15af25431fa55cddf01b12be2c, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5d60da57ff8346f5b265d98e4a56ee9c, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5fe6c14a8bba483e9b3b5a2b411ae4d5] into tmpdir=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp, totalSize=34.6 K 2024-11-11T20:45:54,439 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] compactions.Compactor(225): Compacting ce50be15af25431fa55cddf01b12be2c, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731357930578 2024-11-11T20:45:54,439 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5d60da57ff8346f5b265d98e4a56ee9c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731357938093 2024-11-11T20:45:54,440 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5fe6c14a8bba483e9b3b5a2b411ae4d5, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731357938499 2024-11-11T20:45:54,458 INFO [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e16cb99ec81e6ac94635a2a5c2ce30d#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:45:54,459 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/a30f782c06134c14903e53f23ab08961 is 1080, key is row0002/info:/1731357930578/Put/seqid=0 2024-11-11T20:45:54,461 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,461 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741886_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:54,461 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741886_1070 2024-11-11T20:45:54,462 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:54,463 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,463 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741887_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:54,463 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741887_1071 2024-11-11T20:45:54,463 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:54,465 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43949 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,465 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:35794 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741888_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data4]'}, localName='127.0.0.1:42575', datanodeUuid='7e594afa-bcac-4748-944c-21b12f1087c4', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741888_1072 to mirror 127.0.0.1:43949 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,465 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42575,DS-8a3d5d6e-fee5-49e6-b487-a147dc8006b7,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:54,465 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741888_1072 2024-11-11T20:45:54,465 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:35794 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741888_1072] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:54,465 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:35794 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741888_1072] {}] datanode.DataXceiver(331): 127.0.0.1:42575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35794 dst: /127.0.0.1:42575 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,466 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:54,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741889_1073 (size=23502) 2024-11-11T20:45:54,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741889_1073 (size=23502) 2024-11-11T20:45:54,477 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/a30f782c06134c14903e53f23ab08961 as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/a30f782c06134c14903e53f23ab08961 2024-11-11T20:45:54,486 INFO [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1e16cb99ec81e6ac94635a2a5c2ce30d/info of 1e16cb99ec81e6ac94635a2a5c2ce30d into a30f782c06134c14903e53f23ab08961(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:45:54,486 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: 2024-11-11T20:45:54,486 INFO [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d., storeName=1e16cb99ec81e6ac94635a2a5c2ce30d/info, priority=13, startTime=1731357954436; duration=0sec 2024-11-11T20:45:54,486 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-11T20:45:54,487 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:54,487 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/a30f782c06134c14903e53f23ab08961 because midkey is the same as first or last row 2024-11-11T20:45:54,487 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-11T20:45:54,487 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:54,487 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/a30f782c06134c14903e53f23ab08961 because midkey is the same as first or last row 2024-11-11T20:45:54,487 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-11T20:45:54,487 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:54,487 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/a30f782c06134c14903e53f23ab08961 because midkey is the same as first or last row 2024-11-11T20:45:54,487 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:45:54,487 DEBUG [RS:0;51ca66f7ee3c:38547-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e16cb99ec81e6ac94635a2a5c2ce30d:info 2024-11-11T20:45:54,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38547 {}] regionserver.HRegion(8855): Flush requested on 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:54,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1e16cb99ec81e6ac94635a2a5c2ce30d 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T20:45:54,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/a309b4d820144b879b64685d2e20ca5f is 1080, key is row0018/info:/1731357954409/Put/seqid=0 2024-11-11T20:45:54,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741890_1074 (size=11421) 2024-11-11T20:45:54,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741890_1074 (size=11421) 2024-11-11T20:45:54,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/a309b4d820144b879b64685d2e20ca5f 2024-11-11T20:45:54,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/.tmp/info/a309b4d820144b879b64685d2e20ca5f as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/a309b4d820144b879b64685d2e20ca5f 2024-11-11T20:45:54,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/a309b4d820144b879b64685d2e20ca5f, entries=6, sequenceid=66, filesize=11.2 K 2024-11-11T20:45:54,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1e16cb99ec81e6ac94635a2a5c2ce30d in 24ms, sequenceid=66, compaction requested=false 2024-11-11T20:45:54,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: 2024-11-11T20:45:54,650 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-11T20:45:54,651 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:45:54,651 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/a30f782c06134c14903e53f23ab08961 because midkey is the same as first or last row 2024-11-11T20:45:54,658 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,658 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-11T20:45:54,799 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.1731357936619 to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs/51ca66f7ee3c%2C38547%2C1731357916578.1731357936619 2024-11-11T20:45:54,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T20:45:54,827 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:45:54,827 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:45:54,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:54,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:54,827 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T20:45:54,828 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T20:45:54,828 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1048628396, stopped=false 2024-11-11T20:45:54,828 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=51ca66f7ee3c,39527,1731357916531 2024-11-11T20:45:54,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:54,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:54,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46139-0x100308899d10002, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:45:54,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:54,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:54,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46139-0x100308899d10002, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:45:54,829 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:45:54,829 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:45:54,829 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:45:54,830 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:54,830 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '51ca66f7ee3c,38547,1731357916578' ***** 2024-11-11T20:45:54,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:54,830 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T20:45:54,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:54,830 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '51ca66f7ee3c,46139,1731357917854' ***** 2024-11-11T20:45:54,830 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T20:45:54,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46139-0x100308899d10002, quorum=127.0.0.1:57310, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:45:54,830 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T20:45:54,830 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T20:45:54,830 INFO [RS:0;51ca66f7ee3c:38547 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T20:45:54,830 INFO [RS:1;51ca66f7ee3c:46139 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T20:45:54,830 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T20:45:54,830 INFO [RS:0;51ca66f7ee3c:38547 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T20:45:54,830 INFO [RS:1;51ca66f7ee3c:46139 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T20:45:54,830 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(959): stopping server 51ca66f7ee3c,46139,1731357917854 2024-11-11T20:45:54,830 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:45:54,830 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(3091): Received CLOSE for 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:54,830 INFO [RS:1;51ca66f7ee3c:46139 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;51ca66f7ee3c:46139. 2024-11-11T20:45:54,830 DEBUG [RS:1;51ca66f7ee3c:46139 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:45:54,831 DEBUG [RS:1;51ca66f7ee3c:46139 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:54,831 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(959): stopping server 51ca66f7ee3c,38547,1731357916578 2024-11-11T20:45:54,831 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(976): stopping server 51ca66f7ee3c,46139,1731357917854; all regions closed. 2024-11-11T20:45:54,831 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:45:54,831 INFO [RS:0;51ca66f7ee3c:38547 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;51ca66f7ee3c:38547. 2024-11-11T20:45:54,831 DEBUG [RS:0;51ca66f7ee3c:38547 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:45:54,831 DEBUG [RS:0;51ca66f7ee3c:38547 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:54,831 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1e16cb99ec81e6ac94635a2a5c2ce30d, disabling compactions & flushes 2024-11-11T20:45:54,831 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T20:45:54,831 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:54,831 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T20:45:54,831 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T20:45:54,831 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:54,831 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T20:45:54,831 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. after waiting 0 ms 2024-11-11T20:45:54,831 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T20:45:54,831 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:54,831 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,831 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T20:45:54,831 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,831 DEBUG [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(1325): Online Regions={1e16cb99ec81e6ac94635a2a5c2ce30d=TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T20:45:54,832 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,832 DEBUG [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1e16cb99ec81e6ac94635a2a5c2ce30d 2024-11-11T20:45:54,832 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:45:54,832 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,832 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:45:54,832 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:45:54,832 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,832 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:45:54,832 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:45:54,832 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-11T20:45:54,832 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5b8589a3d1cd4a1c92b144ef914406c2, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/d7364ba7f42d46dc88a2916caea5fd18, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/ce50be15af25431fa55cddf01b12be2c, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/cdae654016a64e7697352c24186a042b, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5d60da57ff8346f5b265d98e4a56ee9c, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5fe6c14a8bba483e9b3b5a2b411ae4d5] to archive 2024-11-11T20:45:54,832 ERROR [FSHLog-0-hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411-prefix:51ca66f7ee3c,38547,1731357916578.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,832 WARN [FSHLog-0-hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411-prefix:51ca66f7ee3c,38547,1731357916578.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,833 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C38547%2C1731357916578.meta:.meta(num 1731357917676) roll requested 2024-11-11T20:45:54,833 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,833 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,833 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 2024-11-11T20:45:54,833 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357954833.meta 2024-11-11T20:45:54,833 WARN [IPC Server handler 3 on default port 46503 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741837_1013 2024-11-11T20:45:54,834 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T20:45:54,834 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 after 1ms 2024-11-11T20:45:54,835 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5b8589a3d1cd4a1c92b144ef914406c2 to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5b8589a3d1cd4a1c92b144ef914406c2 2024-11-11T20:45:54,835 WARN [Thread-1043 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,835 WARN [Thread-1043 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741891_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:54,836 WARN [Thread-1043 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741891_1076 2024-11-11T20:45:54,836 WARN [Thread-1043 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:54,837 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/d7364ba7f42d46dc88a2916caea5fd18 to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/d7364ba7f42d46dc88a2916caea5fd18 2024-11-11T20:45:54,839 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/ce50be15af25431fa55cddf01b12be2c to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/ce50be15af25431fa55cddf01b12be2c 2024-11-11T20:45:54,840 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/cdae654016a64e7697352c24186a042b to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/cdae654016a64e7697352c24186a042b 2024-11-11T20:45:54,841 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5d60da57ff8346f5b265d98e4a56ee9c to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5d60da57ff8346f5b265d98e4a56ee9c 2024-11-11T20:45:54,843 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5fe6c14a8bba483e9b3b5a2b411ae4d5 to hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/info/5fe6c14a8bba483e9b3b5a2b411ae4d5 2024-11-11T20:45:54,843 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=51ca66f7ee3c:39527 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T20:45:54,843 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [5b8589a3d1cd4a1c92b144ef914406c2=10347, d7364ba7f42d46dc88a2916caea5fd18=12506, ce50be15af25431fa55cddf01b12be2c=17994, cdae654016a64e7697352c24186a042b=6027, 5d60da57ff8346f5b265d98e4a56ee9c=6027, 5fe6c14a8bba483e9b3b5a2b411ae4d5=11421] 2024-11-11T20:45:54,846 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,846 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,846 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,846 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,846 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:54,846 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357954833.meta 2024-11-11T20:45:54,847 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,847 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,847 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta 2024-11-11T20:45:54,847 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33083:33083),(127.0.0.1/127.0.0.1:36909:36909)] 2024-11-11T20:45:54,847 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta is not closed yet, will try archiving it next time 2024-11-11T20:45:54,848 WARN [IPC Server handler 4 on default port 46503 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta has not been closed. Lease recovery is in progress. RecoveryId = 1078 for block blk_1073741834_1010 2024-11-11T20:45:54,848 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta after 1ms 2024-11-11T20:45:54,854 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1e16cb99ec81e6ac94635a2a5c2ce30d/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-11T20:45:54,854 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:54,854 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1e16cb99ec81e6ac94635a2a5c2ce30d: Waiting for close lock at 1731357954831Running coprocessor pre-close hooks at 1731357954831Disabling compacts and flushes for region at 1731357954831Disabling writes for close at 1731357954831Writing region close event to WAL at 1731357954845 (+14 ms)Running coprocessor post-close hooks at 1731357954854 (+9 ms)Closed at 1731357954854 2024-11-11T20:45:54,854 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d. 2024-11-11T20:45:54,865 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/.tmp/info/c1e79d4f48d14090b469df8053e634a3 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731357917948.1e16cb99ec81e6ac94635a2a5c2ce30d./info:regioninfo/1731357918326/Put/seqid=0 2024-11-11T20:45:54,867 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,867 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:54,867 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741893_1079 2024-11-11T20:45:54,867 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:54,868 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,868 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741894_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:54,868 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741894_1080 2024-11-11T20:45:54,869 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741895_1081 (size=7089) 2024-11-11T20:45:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741895_1081 (size=7089) 2024-11-11T20:45:54,874 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/.tmp/info/c1e79d4f48d14090b469df8053e634a3 2024-11-11T20:45:54,893 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/.tmp/ns/09930d6246684c6db102c88aa4a28a81 is 43, key is default/ns:d/1731357917731/Put/seqid=0 2024-11-11T20:45:54,896 WARN [Thread-1056 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1082 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33779 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,896 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47654 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741896_1082] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741896_1082 to mirror 127.0.0.1:33779 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,896 WARN [Thread-1056 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741896_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:54,896 WARN [Thread-1056 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741896_1082 2024-11-11T20:45:54,896 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47654 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741896_1082] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:54,896 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47654 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741896_1082] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47654 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,896 WARN [Thread-1056 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:54,898 WARN [Thread-1056 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45495 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,899 WARN [Thread-1056 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:54,898 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47662 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741897_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741897_1083 to mirror 127.0.0.1:45495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,899 WARN [Thread-1056 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741897_1083 2024-11-11T20:45:54,899 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47662 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741897_1083] {}] datanode.BlockReceiver(316): Block 1073741897 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:54,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47662 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741897_1083] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47662 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,899 WARN [Thread-1056 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:54,902 WARN [Thread-1056 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43949 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,902 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47674 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741898_1084] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741898_1084 to mirror 127.0.0.1:43949 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,902 WARN [Thread-1056 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:54,902 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47674 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741898_1084] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:54,902 WARN [Thread-1056 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741898_1084 2024-11-11T20:45:54,902 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47674 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741898_1084] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47674 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,902 WARN [Thread-1056 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:54,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741899_1085 (size=5153) 2024-11-11T20:45:54,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741899_1085 (size=5153) 2024-11-11T20:45:54,908 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/.tmp/ns/09930d6246684c6db102c88aa4a28a81 2024-11-11T20:45:54,928 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/.tmp/table/e22ae3c423954982a99507616b469a04 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731357918337/Put/seqid=0 2024-11-11T20:45:54,931 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33779 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47698 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741900_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10]'}, localName='127.0.0.1:35329', datanodeUuid='c426f781-4da7-4c04-9fda-4773aa7ad7fa', xmitsInProgress=0}:Exception transferring block BP-797088966-172.17.0.2-1731357915882:blk_1073741900_1086 to mirror 127.0.0.1:33779 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,931 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK], DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK]) is bad. 2024-11-11T20:45:54,931 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741900_1086 2024-11-11T20:45:54,931 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47698 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741900_1086] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T20:45:54,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_230479249_22 at /127.0.0.1:47698 [Receiving block BP-797088966-172.17.0.2-1731357915882:blk_1073741900_1086] {}] datanode.DataXceiver(331): 127.0.0.1:35329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47698 dst: /127.0.0.1:35329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:54,931 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33779,DS-8151f419-187c-4be4-a5c2-9bd15b59a9ec,DISK] 2024-11-11T20:45:54,932 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,933 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK]) is bad. 2024-11-11T20:45:54,933 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741901_1087 2024-11-11T20:45:54,933 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45495,DS-55ef0139-4ca1-418c-82b8-09287d423f78,DISK] 2024-11-11T20:45:54,934 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:45:54,934 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-797088966-172.17.0.2-1731357915882:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK], DatanodeInfoWithStorage[127.0.0.1:35329,DS-e3fa3e60-f541-40f6-a9fb-687b70034142,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK]) is bad. 2024-11-11T20:45:54,934 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-797088966-172.17.0.2-1731357915882:blk_1073741902_1088 2024-11-11T20:45:54,935 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-f0c35d67-70b1-48b7-ad47-e78642e496e4,DISK] 2024-11-11T20:45:54,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741903_1089 (size=5424) 2024-11-11T20:45:54,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741903_1089 (size=5424) 2024-11-11T20:45:54,939 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/.tmp/table/e22ae3c423954982a99507616b469a04 2024-11-11T20:45:54,946 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/.tmp/info/c1e79d4f48d14090b469df8053e634a3 as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/info/c1e79d4f48d14090b469df8053e634a3 2024-11-11T20:45:54,952 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/info/c1e79d4f48d14090b469df8053e634a3, entries=10, sequenceid=11, filesize=6.9 K 2024-11-11T20:45:54,954 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/.tmp/ns/09930d6246684c6db102c88aa4a28a81 as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/ns/09930d6246684c6db102c88aa4a28a81 2024-11-11T20:45:54,959 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/ns/09930d6246684c6db102c88aa4a28a81, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T20:45:54,960 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/.tmp/table/e22ae3c423954982a99507616b469a04 as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/table/e22ae3c423954982a99507616b469a04 2024-11-11T20:45:54,966 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/table/e22ae3c423954982a99507616b469a04, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T20:45:54,967 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false 2024-11-11T20:45:54,972 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T20:45:54,973 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:45:54,973 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:45:54,973 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731357954831Running coprocessor pre-close hooks at 1731357954832 (+1 ms)Disabling compacts and flushes for region at 1731357954832Disabling writes for close at 1731357954832Obtaining lock to block concurrent updates at 1731357954832Preparing flush snapshotting stores in 1588230740 at 1731357954832Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731357954832Flushing stores of hbase:meta,,1.1588230740 at 1731357954848 (+16 ms)Flushing 1588230740/info: creating writer at 1731357954848Flushing 1588230740/info: appending metadata at 1731357954864 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731357954864Flushing 1588230740/ns: creating writer at 1731357954879 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731357954892 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731357954892Flushing 1588230740/table: creating writer at 1731357954913 (+21 ms)Flushing 1588230740/table: appending metadata at 1731357954928 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731357954928Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6618c2fb: reopening flushed file at 1731357954945 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a9e477c: reopening flushed file at 1731357954953 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3bc30d24: reopening flushed file at 1731357954960 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false at 1731357954967 (+7 ms)Writing region close event to WAL at 1731357954969 (+2 ms)Running coprocessor post-close hooks at 1731357954973 (+4 ms)Closed at 1731357954973 2024-11-11T20:45:54,973 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T20:45:54,974 INFO [regionserver/51ca66f7ee3c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:45:55,009 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T20:45:55,009 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T20:45:55,014 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T20:45:55,014 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T20:45:55,032 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(976): stopping server 51ca66f7ee3c,38547,1731357916578; all regions closed. 2024-11-11T20:45:55,032 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:55,032 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:55,033 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:55,033 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:55,033 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:45:55,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741892_1077 (size=825) 2024-11-11T20:45:55,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741892_1077 (size=825) 2024-11-11T20:45:55,499 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@56b48c83[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35329, datanodeUuid=c426f781-4da7-4c04-9fda-4773aa7ad7fa, infoPort=36909, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=502823967;c=1731357915882):Failed to transfer BP-797088966-172.17.0.2-1731357915882:blk_1073741863_1046 to 127.0.0.1:43949 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:45:55,918 INFO [regionserver/51ca66f7ee3c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:45:56,830 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T20:45:56,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:45:56,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T20:45:58,639 INFO [master/51ca66f7ee3c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T20:45:58,639 INFO [master/51ca66f7ee3c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T20:45:58,835 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 after 4002ms 2024-11-11T20:45:58,850 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta after 4003ms 2024-11-11T20:45:58,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741836_1012 (size=76) 2024-11-11T20:45:58,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:45:58,926 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3a63bc4b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-797088966-172.17.0.2-1731357915882:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:33779,null,null]) java.net.ConnectException: Call From 51ca66f7ee3c/172.17.0.2 to localhost:37701 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T20:45:59,833 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-11T20:45:59,839 DEBUG [RS:1;51ca66f7ee3c:46139 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs 2024-11-11T20:45:59,839 INFO [RS:1;51ca66f7ee3c:46139 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C46139%2C1731357917854:(num 1731357918051) 2024-11-11T20:45:59,839 DEBUG [RS:1;51ca66f7ee3c:46139 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:45:59,839 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:45:59,840 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:45:59,840 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.ChoreService(370): Chore service for: regionserver/51ca66f7ee3c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T20:45:59,841 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T20:45:59,841 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:45:59,841 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T20:45:59,841 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T20:45:59,841 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:45:59,841 INFO [RS:1;51ca66f7ee3c:46139 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46139 2024-11-11T20:45:59,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46139-0x100308899d10002, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/51ca66f7ee3c,46139,1731357917854 2024-11-11T20:45:59,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:45:59,843 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:45:59,844 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [51ca66f7ee3c,46139,1731357917854] 2024-11-11T20:45:59,845 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/51ca66f7ee3c,46139,1731357917854 already deleted, retry=false 2024-11-11T20:45:59,845 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 51ca66f7ee3c,46139,1731357917854 expired; onlineServers=1 2024-11-11T20:45:59,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:59,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:59,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:59,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:59,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:59,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:59,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:59,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:45:59,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741835_1011 (size=393) 2024-11-11T20:45:59,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:45:59,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46139-0x100308899d10002, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:45:59,944 INFO [RS:1;51ca66f7ee3c:46139 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:45:59,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46139-0x100308899d10002, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:45:59,944 INFO [RS:1;51ca66f7ee3c:46139 {}] regionserver.HRegionServer(1031): Exiting; stopping=51ca66f7ee3c,46139,1731357917854; zookeeper connection closed. 2024-11-11T20:45:59,945 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4655b128 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4655b128 2024-11-11T20:46:00,033 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-11T20:46:00,042 DEBUG [RS:0;51ca66f7ee3c:38547 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs 2024-11-11T20:46:00,043 INFO [RS:0;51ca66f7ee3c:38547 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C38547%2C1731357916578.meta:.meta(num 1731357954833) 2024-11-11T20:46:00,044 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:00,044 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:00,044 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:00,044 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:00,045 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:00,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741884_1068 (size=16308) 2024-11-11T20:46:00,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741884_1068 (size=16308) 2024-11-11T20:46:00,049 DEBUG [RS:0;51ca66f7ee3c:38547 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/oldWALs 2024-11-11T20:46:00,049 INFO [RS:0;51ca66f7ee3c:38547 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C38547%2C1731357916578:(num 1731357954374) 2024-11-11T20:46:00,049 DEBUG [RS:0;51ca66f7ee3c:38547 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:46:00,050 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:46:00,050 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:46:00,050 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.ChoreService(370): Chore service for: regionserver/51ca66f7ee3c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T20:46:00,050 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:46:00,050 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:46:00,050 INFO [RS:0;51ca66f7ee3c:38547 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38547 2024-11-11T20:46:00,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/51ca66f7ee3c,38547,1731357916578 2024-11-11T20:46:00,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:46:00,051 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:46:00,052 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [51ca66f7ee3c,38547,1731357916578] 2024-11-11T20:46:00,053 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/51ca66f7ee3c,38547,1731357916578 already deleted, retry=false 2024-11-11T20:46:00,053 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 51ca66f7ee3c,38547,1731357916578 expired; onlineServers=0 2024-11-11T20:46:00,053 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '51ca66f7ee3c,39527,1731357916531' ***** 2024-11-11T20:46:00,053 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T20:46:00,053 INFO [M:0;51ca66f7ee3c:39527 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:46:00,053 INFO [M:0;51ca66f7ee3c:39527 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:46:00,053 DEBUG [M:0;51ca66f7ee3c:39527 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T20:46:00,053 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T20:46:00,053 DEBUG [M:0;51ca66f7ee3c:39527 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T20:46:00,054 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357916910 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357916910,5,FailOnTimeoutGroup] 2024-11-11T20:46:00,054 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357916901 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357916901,5,FailOnTimeoutGroup] 2024-11-11T20:46:00,054 INFO [M:0;51ca66f7ee3c:39527 {}] hbase.ChoreService(370): Chore service for: master/51ca66f7ee3c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T20:46:00,054 INFO [M:0;51ca66f7ee3c:39527 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:46:00,054 DEBUG [M:0;51ca66f7ee3c:39527 {}] master.HMaster(1795): Stopping service threads 2024-11-11T20:46:00,054 INFO [M:0;51ca66f7ee3c:39527 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T20:46:00,054 INFO [M:0;51ca66f7ee3c:39527 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:46:00,054 INFO [M:0;51ca66f7ee3c:39527 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T20:46:00,054 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T20:46:00,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T20:46:00,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:00,055 DEBUG [M:0;51ca66f7ee3c:39527 {}] zookeeper.ZKUtil(347): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T20:46:00,055 WARN [M:0;51ca66f7ee3c:39527 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T20:46:00,055 INFO [M:0;51ca66f7ee3c:39527 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/.lastflushedseqids 2024-11-11T20:46:00,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741904_1090 (size=130) 2024-11-11T20:46:00,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741904_1090 (size=130) 2024-11-11T20:46:00,061 INFO [M:0;51ca66f7ee3c:39527 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T20:46:00,061 INFO [M:0;51ca66f7ee3c:39527 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T20:46:00,062 DEBUG [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:46:00,062 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:00,062 DEBUG [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:00,062 DEBUG [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:46:00,062 DEBUG [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:00,062 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-11-11T20:46:00,076 DEBUG [M:0;51ca66f7ee3c:39527 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cf1f4ea73ca9492ab61e0afdff870ecf is 82, key is hbase:meta,,1/info:regioninfo/1731357917715/Put/seqid=0 2024-11-11T20:46:00,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741905_1091 (size=5672) 2024-11-11T20:46:00,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741905_1091 (size=5672) 2024-11-11T20:46:00,082 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cf1f4ea73ca9492ab61e0afdff870ecf 2024-11-11T20:46:00,103 DEBUG [M:0;51ca66f7ee3c:39527 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a84c24231a71438b8f144609ebb20a93 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731357918342/Put/seqid=0 2024-11-11T20:46:00,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741906_1092 (size=6254) 2024-11-11T20:46:00,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741906_1092 (size=6254) 2024-11-11T20:46:00,109 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a84c24231a71438b8f144609ebb20a93 2024-11-11T20:46:00,114 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a84c24231a71438b8f144609ebb20a93 2024-11-11T20:46:00,128 DEBUG [M:0;51ca66f7ee3c:39527 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/435da5aaf51142dd896487b3ecc22e78 is 69, key is 51ca66f7ee3c,38547,1731357916578/rs:state/1731357916942/Put/seqid=0 2024-11-11T20:46:00,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741907_1093 (size=5224) 2024-11-11T20:46:00,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741907_1093 (size=5224) 2024-11-11T20:46:00,133 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/435da5aaf51142dd896487b3ecc22e78 2024-11-11T20:46:00,152 INFO [RS:0;51ca66f7ee3c:38547 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:46:00,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:46:00,152 INFO [RS:0;51ca66f7ee3c:38547 {}] regionserver.HRegionServer(1031): Exiting; stopping=51ca66f7ee3c,38547,1731357916578; zookeeper connection closed. 2024-11-11T20:46:00,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38547-0x100308899d10001, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:46:00,152 DEBUG [M:0;51ca66f7ee3c:39527 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/02c18479c02844c9b802478e078a41ef is 52, key is load_balancer_on/state:d/1731357917832/Put/seqid=0 2024-11-11T20:46:00,153 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1c429ad6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1c429ad6 2024-11-11T20:46:00,153 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-11T20:46:00,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741908_1094 (size=5056) 2024-11-11T20:46:00,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741908_1094 (size=5056) 2024-11-11T20:46:00,158 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/02c18479c02844c9b802478e078a41ef 2024-11-11T20:46:00,163 DEBUG [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cf1f4ea73ca9492ab61e0afdff870ecf as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cf1f4ea73ca9492ab61e0afdff870ecf 2024-11-11T20:46:00,168 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cf1f4ea73ca9492ab61e0afdff870ecf, entries=8, sequenceid=60, filesize=5.5 K 2024-11-11T20:46:00,170 DEBUG [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a84c24231a71438b8f144609ebb20a93 as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a84c24231a71438b8f144609ebb20a93 2024-11-11T20:46:00,175 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a84c24231a71438b8f144609ebb20a93 2024-11-11T20:46:00,175 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a84c24231a71438b8f144609ebb20a93, entries=6, sequenceid=60, filesize=6.1 K 2024-11-11T20:46:00,176 DEBUG [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/435da5aaf51142dd896487b3ecc22e78 as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/435da5aaf51142dd896487b3ecc22e78 2024-11-11T20:46:00,181 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/435da5aaf51142dd896487b3ecc22e78, entries=2, sequenceid=60, filesize=5.1 K 2024-11-11T20:46:00,182 DEBUG [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/02c18479c02844c9b802478e078a41ef as hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/02c18479c02844c9b802478e078a41ef 2024-11-11T20:46:00,187 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/02c18479c02844c9b802478e078a41ef, entries=1, sequenceid=60, filesize=4.9 K 2024-11-11T20:46:00,188 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=60, compaction requested=false 2024-11-11T20:46:00,190 INFO [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:00,190 DEBUG [M:0;51ca66f7ee3c:39527 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731357960062Disabling compacts and flushes for region at 1731357960062Disabling writes for close at 1731357960062Obtaining lock to block concurrent updates at 1731357960062Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731357960062Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1731357960062Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731357960063 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731357960063Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731357960076 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731357960076Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731357960086 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731357960103 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731357960103Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731357960114 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731357960127 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731357960127Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731357960138 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731357960152 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731357960152Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b4977b6: reopening flushed file at 1731357960162 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@af33cee: reopening flushed file at 1731357960169 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f92b3c9: reopening flushed file at 1731357960175 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@462e607c: reopening flushed file at 1731357960181 (+6 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=60, compaction requested=false at 1731357960188 (+7 ms)Writing region close event to WAL at 1731357960190 (+2 ms)Closed at 1731357960190 2024-11-11T20:46:00,190 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:00,190 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:00,191 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:00,191 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:00,191 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:00,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741880_1063 (size=1045) 2024-11-11T20:46:00,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35329 is added to blk_1073741880_1063 (size=1045) 2024-11-11T20:46:00,193 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:46:00,193 INFO [M:0;51ca66f7ee3c:39527 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T20:46:00,193 INFO [M:0;51ca66f7ee3c:39527 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39527 2024-11-11T20:46:00,194 INFO [M:0;51ca66f7ee3c:39527 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:46:00,295 INFO [M:0;51ca66f7ee3c:39527 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:46:00,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:46:00,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39527-0x100308899d10000, quorum=127.0.0.1:57310, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:46:00,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70a14b1c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:00,299 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1929003e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:46:00,299 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:46:00,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e87f24b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:46:00,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17a19f67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,STOPPED} 2024-11-11T20:46:00,302 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:46:00,302 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:46:00,302 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:46:00,302 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-797088966-172.17.0.2-1731357915882 (Datanode Uuid 7e594afa-bcac-4748-944c-21b12f1087c4) service to localhost/127.0.0.1:46503 2024-11-11T20:46:00,301 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:33779,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:37701 , LocalHost:localPort 51ca66f7ee3c/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T20:46:00,302 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:42575,null,null]) java.io.IOException: No block pool offer service for bpid=BP-797088966-172.17.0.2-1731357915882 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:00,303 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33779,null,null], DatanodeInfoWithStorage[127.0.0.1:42575,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-797088966-172.17.0.2-1731357915882:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:33779,null,null], DatanodeInfoWithStorage[127.0.0.1:42575,null,null]] 2024-11-11T20:46:00,303 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data3/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:00,303 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:33779,null,null]) java.io.IOException: No block pool offer service for bpid=BP-797088966-172.17.0.2-1731357915882 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:00,303 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:42575,null,null]) java.io.IOException: No block pool offer service for bpid=BP-797088966-172.17.0.2-1731357915882 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:00,303 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data4/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:00,303 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33779,null,null], DatanodeInfoWithStorage[127.0.0.1:42575,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-797088966-172.17.0.2-1731357915882:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:33779,null,null], DatanodeInfoWithStorage[127.0.0.1:42575,null,null]] 2024-11-11T20:46:00,304 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:46:00,305 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@542c34dc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:00,306 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f0a2519{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:46:00,306 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:46:00,306 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21f536ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:46:00,306 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2801262{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,STOPPED} 2024-11-11T20:46:00,307 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:46:00,307 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:46:00,307 WARN [BP-797088966-172.17.0.2-1731357915882 heartbeating to localhost/127.0.0.1:46503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-797088966-172.17.0.2-1731357915882 (Datanode Uuid c426f781-4da7-4c04-9fda-4773aa7ad7fa) service to localhost/127.0.0.1:46503 2024-11-11T20:46:00,307 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:46:00,308 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data9/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:00,308 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/cluster_d41c68ba-b5af-f774-fcab-25da8055c186/data/data10/current/BP-797088966-172.17.0.2-1731357915882 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:00,308 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:46:00,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4705e615{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:46:00,313 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d8a9c69{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:46:00,313 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:46:00,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16369da1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:46:00,314 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15d1211f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir/,STOPPED} 2024-11-11T20:46:00,323 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T20:46:00,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T20:46:00,366 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=153 (was 78) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:39927 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46503 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46503 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f0f80bf46e8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f0f80bf46e8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39927 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46503 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:46503 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46503 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:46503 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46503 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=115 (was 154), ProcessCount=11 (was 11), AvailableMemoryMB=4203 (was 5062) 2024-11-11T20:46:00,372 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=153, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=115, ProcessCount=11, AvailableMemoryMB=4202 2024-11-11T20:46:00,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T20:46:00,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.log.dir so I do NOT create it in target/test-data/cf12611e-2316-150e-8454-e237ac8003dc 2024-11-11T20:46:00,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/086e6c11-646b-bf2a-d1e2-4f7a8cec6e75/hadoop.tmp.dir so I do NOT create it in target/test-data/cf12611e-2316-150e-8454-e237ac8003dc 2024-11-11T20:46:00,373 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11, deleteOnExit=true 2024-11-11T20:46:00,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T20:46:00,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/test.cache.data in system properties and HBase conf 2024-11-11T20:46:00,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T20:46:00,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir in system properties and HBase conf 2024-11-11T20:46:00,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T20:46:00,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T20:46:00,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T20:46:00,374 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/nfs.dump.dir in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/java.io.tmpdir in system properties and HBase conf 2024-11-11T20:46:00,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:46:00,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T20:46:00,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T20:46:00,386 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:46:00,393 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T20:46:00,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:00,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:00,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:00,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:00,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:00,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:00,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:00,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:00,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:00,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:00,471 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:00,475 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:46:00,476 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:46:00,476 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:46:00,476 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:46:00,477 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:00,477 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@464ea64c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:46:00,477 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c5497db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:46:00,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a59d25d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/java.io.tmpdir/jetty-localhost-40175-hadoop-hdfs-3_4_1-tests_jar-_-any-8391559195156638814/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:46:00,571 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e526681{HTTP/1.1, (http/1.1)}{localhost:40175} 2024-11-11T20:46:00,571 INFO [Time-limited test {}] server.Server(415): Started @149796ms 2024-11-11T20:46:00,583 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:46:00,633 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:00,636 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:46:00,636 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:46:00,636 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:46:00,636 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:46:00,637 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fec76d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:46:00,637 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b810f17{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:46:00,731 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@168478e8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/java.io.tmpdir/jetty-localhost-42029-hadoop-hdfs-3_4_1-tests_jar-_-any-13922161353539514704/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:00,732 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61549df4{HTTP/1.1, (http/1.1)}{localhost:42029} 2024-11-11T20:46:00,732 INFO [Time-limited test {}] server.Server(415): Started @149957ms 2024-11-11T20:46:00,733 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:46:00,764 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:00,768 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:46:00,771 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:46:00,771 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:46:00,771 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T20:46:00,772 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1da660ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:46:00,772 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14a79ae9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:46:00,799 WARN [Thread-1188 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data1/current/BP-391654463-172.17.0.2-1731357960435/current, will proceed with Du for space computation calculation, 2024-11-11T20:46:00,799 WARN [Thread-1189 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data2/current/BP-391654463-172.17.0.2-1731357960435/current, will proceed with Du for space computation calculation, 2024-11-11T20:46:00,818 WARN [Thread-1167 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:46:00,820 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb2a4c64f89b830a2 with lease ID 0x5dc5c1a0148ddbde: Processing first storage report for DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb from datanode DatanodeRegistration(127.0.0.1:36619, datanodeUuid=253fda30-9b38-4c72-8812-1987252b2841, infoPort=39545, infoSecurePort=0, ipcPort=46693, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435) 2024-11-11T20:46:00,820 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb2a4c64f89b830a2 with lease ID 0x5dc5c1a0148ddbde: from storage DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb node DatanodeRegistration(127.0.0.1:36619, datanodeUuid=253fda30-9b38-4c72-8812-1987252b2841, infoPort=39545, infoSecurePort=0, ipcPort=46693, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:00,820 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb2a4c64f89b830a2 with lease ID 0x5dc5c1a0148ddbde: Processing first storage report for DS-b069680b-e99f-4791-9618-c00cf1a30fc7 from datanode DatanodeRegistration(127.0.0.1:36619, datanodeUuid=253fda30-9b38-4c72-8812-1987252b2841, infoPort=39545, infoSecurePort=0, ipcPort=46693, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435) 2024-11-11T20:46:00,821 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb2a4c64f89b830a2 with lease ID 0x5dc5c1a0148ddbde: from storage DS-b069680b-e99f-4791-9618-c00cf1a30fc7 node DatanodeRegistration(127.0.0.1:36619, datanodeUuid=253fda30-9b38-4c72-8812-1987252b2841, infoPort=39545, infoSecurePort=0, ipcPort=46693, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:00,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:00,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:00,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@151f97c5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/java.io.tmpdir/jetty-localhost-39473-hadoop-hdfs-3_4_1-tests_jar-_-any-11092905776408452759/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:00,875 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@232fa1ae{HTTP/1.1, (http/1.1)}{localhost:39473} 2024-11-11T20:46:00,876 INFO [Time-limited test {}] server.Server(415): Started @150100ms 2024-11-11T20:46:00,877 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:46:00,932 WARN [Thread-1215 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data4/current/BP-391654463-172.17.0.2-1731357960435/current, will proceed with Du for space computation calculation, 2024-11-11T20:46:00,932 WARN [Thread-1214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data3/current/BP-391654463-172.17.0.2-1731357960435/current, will proceed with Du for space computation calculation, 2024-11-11T20:46:00,950 WARN [Thread-1203 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:46:00,952 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa2078713542514eb with lease ID 0x5dc5c1a0148ddbdf: Processing first storage report for DS-a1a81eb6-4e25-4e82-b894-7c166deab86b from datanode DatanodeRegistration(127.0.0.1:44023, datanodeUuid=09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab, infoPort=44403, infoSecurePort=0, ipcPort=38009, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435) 2024-11-11T20:46:00,952 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa2078713542514eb with lease ID 0x5dc5c1a0148ddbdf: from storage DS-a1a81eb6-4e25-4e82-b894-7c166deab86b node DatanodeRegistration(127.0.0.1:44023, datanodeUuid=09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab, infoPort=44403, infoSecurePort=0, ipcPort=38009, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:00,952 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa2078713542514eb with lease ID 0x5dc5c1a0148ddbdf: Processing first storage report for DS-eb59f929-d355-4a5a-88ce-889b099919e3 from datanode DatanodeRegistration(127.0.0.1:44023, datanodeUuid=09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab, infoPort=44403, infoSecurePort=0, ipcPort=38009, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435) 2024-11-11T20:46:00,952 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa2078713542514eb with lease ID 0x5dc5c1a0148ddbdf: from storage DS-eb59f929-d355-4a5a-88ce-889b099919e3 node DatanodeRegistration(127.0.0.1:44023, datanodeUuid=09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab, infoPort=44403, infoSecurePort=0, ipcPort=38009, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:01,001 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc 2024-11-11T20:46:01,006 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/zookeeper_0, clientPort=56616, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T20:46:01,007 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56616 2024-11-11T20:46:01,007 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:01,009 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:01,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36619 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:46:01,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44023 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:46:01,022 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5 with version=8 2024-11-11T20:46:01,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/hbase-staging 2024-11-11T20:46:01,024 INFO [Time-limited test {}] client.ConnectionUtils(128): master/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:46:01,024 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:01,024 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:01,025 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:46:01,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:01,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:46:01,025 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T20:46:01,025 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:46:01,026 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40891 2024-11-11T20:46:01,027 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40891 connecting to ZooKeeper ensemble=127.0.0.1:56616 2024-11-11T20:46:01,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:408910x0, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:46:01,031 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40891-0x1003089479b0000 connected 2024-11-11T20:46:01,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:01,042 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:01,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:46:01,043 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5, hbase.cluster.distributed=false 2024-11-11T20:46:01,045 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:46:01,045 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40891 2024-11-11T20:46:01,046 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40891 2024-11-11T20:46:01,046 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40891 2024-11-11T20:46:01,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40891 2024-11-11T20:46:01,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40891 2024-11-11T20:46:01,062 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:46:01,062 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:01,062 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:01,062 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:46:01,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:01,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:46:01,063 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T20:46:01,063 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:46:01,063 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40597 2024-11-11T20:46:01,065 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40597 connecting to ZooKeeper ensemble=127.0.0.1:56616 2024-11-11T20:46:01,065 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:01,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:01,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405970x0, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:46:01,070 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:405970x0, quorum=127.0.0.1:56616, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:46:01,070 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40597-0x1003089479b0001 connected 2024-11-11T20:46:01,070 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T20:46:01,070 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T20:46:01,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T20:46:01,072 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:46:01,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40597 2024-11-11T20:46:01,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40597 2024-11-11T20:46:01,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40597 2024-11-11T20:46:01,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40597 2024-11-11T20:46:01,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40597 2024-11-11T20:46:01,084 DEBUG [M:0;51ca66f7ee3c:40891 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;51ca66f7ee3c:40891 2024-11-11T20:46:01,084 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/51ca66f7ee3c,40891,1731357961024 2024-11-11T20:46:01,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:46:01,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:46:01,086 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/51ca66f7ee3c,40891,1731357961024 2024-11-11T20:46:01,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T20:46:01,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,088 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T20:46:01,088 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/51ca66f7ee3c,40891,1731357961024 from backup master directory 2024-11-11T20:46:01,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/51ca66f7ee3c,40891,1731357961024 2024-11-11T20:46:01,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:46:01,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:46:01,089 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:46:01,089 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=51ca66f7ee3c,40891,1731357961024 2024-11-11T20:46:01,092 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/hbase.id] with ID: c763c986-7925-4f4b-829e-dc08878bfb77 2024-11-11T20:46:01,092 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/.tmp/hbase.id 2024-11-11T20:46:01,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44023 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:46:01,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36619 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:46:01,098 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/.tmp/hbase.id]:[hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/hbase.id] 2024-11-11T20:46:01,110 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:01,110 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T20:46:01,111 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T20:46:01,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44023 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:46:01,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36619 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:46:01,119 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:46:01,120 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T20:46:01,120 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:46:01,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44023 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:46:01,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36619 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:46:01,130 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store 2024-11-11T20:46:01,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44023 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:46:01,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36619 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:46:01,136 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:01,136 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:46:01,137 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:01,137 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:01,137 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:46:01,137 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:01,137 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:01,137 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731357961136Disabling compacts and flushes for region at 1731357961136Disabling writes for close at 1731357961137 (+1 ms)Writing region close event to WAL at 1731357961137Closed at 1731357961137 2024-11-11T20:46:01,138 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/.initializing 2024-11-11T20:46:01,138 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024 2024-11-11T20:46:01,140 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C40891%2C1731357961024, suffix=, logDir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024, archiveDir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/oldWALs, maxLogs=10 2024-11-11T20:46:01,141 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 2024-11-11T20:46:01,145 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 2024-11-11T20:46:01,146 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44403:44403),(127.0.0.1/127.0.0.1:39545:39545)] 2024-11-11T20:46:01,146 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:46:01,146 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:01,147 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,147 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T20:46:01,149 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:01,150 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T20:46:01,151 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:46:01,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T20:46:01,152 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:46:01,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T20:46:01,154 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:46:01,154 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,155 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,155 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,157 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,157 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,157 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T20:46:01,158 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:01,160 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:46:01,161 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824206, jitterRate=0.048032164573669434}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T20:46:01,161 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731357961147Initializing all the Stores at 1731357961147Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357961147Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357961148 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357961148Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357961148Cleaning up temporary data from old regions at 1731357961157 (+9 ms)Region opened successfully at 1731357961161 (+4 ms) 2024-11-11T20:46:01,162 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T20:46:01,165 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@287bf36e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:46:01,166 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T20:46:01,166 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T20:46:01,166 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T20:46:01,166 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T20:46:01,167 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T20:46:01,167 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T20:46:01,167 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T20:46:01,169 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T20:46:01,169 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T20:46:01,170 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T20:46:01,171 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T20:46:01,171 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T20:46:01,172 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T20:46:01,172 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T20:46:01,173 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T20:46:01,174 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T20:46:01,175 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T20:46:01,175 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T20:46:01,177 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T20:46:01,178 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T20:46:01,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:46:01,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:46:01,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,179 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=51ca66f7ee3c,40891,1731357961024, sessionid=0x1003089479b0000, setting cluster-up flag (Was=false) 2024-11-11T20:46:01,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,183 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T20:46:01,184 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,40891,1731357961024 2024-11-11T20:46:01,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,189 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T20:46:01,189 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,40891,1731357961024 2024-11-11T20:46:01,191 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T20:46:01,192 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T20:46:01,192 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T20:46:01,193 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T20:46:01,193 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 51ca66f7ee3c,40891,1731357961024 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T20:46:01,194 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:46:01,194 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:46:01,194 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:46:01,194 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:46:01,194 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/51ca66f7ee3c:0, corePoolSize=10, maxPoolSize=10 2024-11-11T20:46:01,194 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,194 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:46:01,195 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,199 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731357991199 2024-11-11T20:46:01,199 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T20:46:01,199 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T20:46:01,199 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T20:46:01,199 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T20:46:01,199 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T20:46:01,199 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T20:46:01,199 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:46:01,199 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T20:46:01,201 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,201 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,201 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T20:46:01,204 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T20:46:01,204 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T20:46:01,204 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T20:46:01,205 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T20:46:01,205 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T20:46:01,205 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357961205,5,FailOnTimeoutGroup] 2024-11-11T20:46:01,205 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357961205,5,FailOnTimeoutGroup] 2024-11-11T20:46:01,205 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,205 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T20:46:01,205 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,205 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36619 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:46:01,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44023 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:46:01,209 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T20:46:01,210 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5 2024-11-11T20:46:01,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44023 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:46:01,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36619 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:46:01,217 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:01,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:46:01,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:46:01,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:01,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:46:01,222 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:46:01,222 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:01,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:46:01,223 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:46:01,223 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:01,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:46:01,225 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:46:01,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:01,225 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:46:01,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740 2024-11-11T20:46:01,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740 2024-11-11T20:46:01,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:46:01,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:46:01,228 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:46:01,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:46:01,231 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:46:01,232 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788645, jitterRate=0.0028144270181655884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:46:01,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731357961217Initializing all the Stores at 1731357961218 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357961218Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357961219 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357961219Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357961219Cleaning up temporary data from old regions at 1731357961228 (+9 ms)Region opened successfully at 1731357961233 (+5 ms) 2024-11-11T20:46:01,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:46:01,233 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:46:01,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:46:01,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:46:01,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:46:01,234 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:46:01,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731357961233Disabling compacts and flushes for region at 1731357961233Disabling writes for close at 1731357961233Writing region close event to WAL at 1731357961233Closed at 1731357961233 2024-11-11T20:46:01,235 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:46:01,235 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T20:46:01,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T20:46:01,237 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:46:01,237 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T20:46:01,275 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(746): ClusterId : c763c986-7925-4f4b-829e-dc08878bfb77 2024-11-11T20:46:01,275 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T20:46:01,277 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T20:46:01,277 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T20:46:01,278 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T20:46:01,279 DEBUG [RS:0;51ca66f7ee3c:40597 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cee4c40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:46:01,289 DEBUG [RS:0;51ca66f7ee3c:40597 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;51ca66f7ee3c:40597 2024-11-11T20:46:01,289 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T20:46:01,289 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T20:46:01,289 DEBUG [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T20:46:01,289 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(2659): reportForDuty to master=51ca66f7ee3c,40891,1731357961024 with port=40597, startcode=1731357961062 2024-11-11T20:46:01,290 DEBUG [RS:0;51ca66f7ee3c:40597 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T20:46:01,291 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44597, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T20:46:01,292 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40891 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:01,292 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40891 {}] master.ServerManager(517): Registering regionserver=51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:01,294 DEBUG [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5 2024-11-11T20:46:01,294 DEBUG [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36857 2024-11-11T20:46:01,294 DEBUG [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T20:46:01,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:46:01,295 DEBUG [RS:0;51ca66f7ee3c:40597 {}] zookeeper.ZKUtil(111): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:01,295 WARN [RS:0;51ca66f7ee3c:40597 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:46:01,296 INFO [RS:0;51ca66f7ee3c:40597 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:46:01,296 DEBUG [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:01,296 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [51ca66f7ee3c,40597,1731357961062] 2024-11-11T20:46:01,299 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T20:46:01,300 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T20:46:01,301 INFO [RS:0;51ca66f7ee3c:40597 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T20:46:01,301 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,301 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T20:46:01,302 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T20:46:01,302 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,302 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,302 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,302 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,302 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,303 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,303 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:46:01,303 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,303 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,303 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,303 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,303 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,303 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:01,303 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:46:01,303 DEBUG [RS:0;51ca66f7ee3c:40597 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:46:01,304 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,304 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,304 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,304 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,304 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,304 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40597,1731357961062-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:46:01,317 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T20:46:01,318 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40597,1731357961062-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,318 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,318 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.Replication(171): 51ca66f7ee3c,40597,1731357961062 started 2024-11-11T20:46:01,331 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,331 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(1482): Serving as 51ca66f7ee3c,40597,1731357961062, RpcServer on 51ca66f7ee3c/172.17.0.2:40597, sessionid=0x1003089479b0001 2024-11-11T20:46:01,331 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T20:46:01,331 DEBUG [RS:0;51ca66f7ee3c:40597 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:01,331 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,40597,1731357961062' 2024-11-11T20:46:01,331 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T20:46:01,332 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T20:46:01,332 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T20:46:01,332 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T20:46:01,332 DEBUG [RS:0;51ca66f7ee3c:40597 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:01,332 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,40597,1731357961062' 2024-11-11T20:46:01,332 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T20:46:01,332 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T20:46:01,333 DEBUG [RS:0;51ca66f7ee3c:40597 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T20:46:01,333 INFO [RS:0;51ca66f7ee3c:40597 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T20:46:01,333 INFO [RS:0;51ca66f7ee3c:40597 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T20:46:01,388 WARN [51ca66f7ee3c:40891 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T20:46:01,435 INFO [RS:0;51ca66f7ee3c:40597 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C40597%2C1731357961062, suffix=, logDir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062, archiveDir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/oldWALs, maxLogs=32 2024-11-11T20:46:01,436 INFO [RS:0;51ca66f7ee3c:40597 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 2024-11-11T20:46:01,442 INFO [RS:0;51ca66f7ee3c:40597 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 2024-11-11T20:46:01,443 DEBUG [RS:0;51ca66f7ee3c:40597 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39545:39545),(127.0.0.1/127.0.0.1:44403:44403)] 2024-11-11T20:46:01,638 DEBUG [51ca66f7ee3c:40891 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T20:46:01,638 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:01,640 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,40597,1731357961062, state=OPENING 2024-11-11T20:46:01,641 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T20:46:01,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:01,642 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:46:01,642 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:46:01,642 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:46:01,642 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,40597,1731357961062}] 2024-11-11T20:46:01,797 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T20:46:01,801 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60485, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T20:46:01,809 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T20:46:01,809 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:46:01,812 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C40597%2C1731357961062.meta, suffix=.meta, logDir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062, archiveDir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/oldWALs, maxLogs=32 2024-11-11T20:46:01,813 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta 2024-11-11T20:46:01,820 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta 2024-11-11T20:46:01,820 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44403:44403),(127.0.0.1/127.0.0.1:39545:39545)] 2024-11-11T20:46:01,821 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:46:01,822 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T20:46:01,822 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T20:46:01,822 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T20:46:01,822 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T20:46:01,822 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:01,822 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T20:46:01,822 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T20:46:01,823 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:46:01,824 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:46:01,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:01,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:46:01,825 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:46:01,825 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:01,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:46:01,827 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:46:01,827 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:01,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:46:01,828 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:46:01,828 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,828 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:01,828 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:46:01,829 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740 2024-11-11T20:46:01,830 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740 2024-11-11T20:46:01,831 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:46:01,831 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:46:01,832 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:46:01,833 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:46:01,834 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859608, jitterRate=0.09304915368556976}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:46:01,834 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T20:46:01,834 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731357961822Writing region info on filesystem at 1731357961822Initializing all the Stores at 1731357961823 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357961823Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357961823Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357961823Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357961823Cleaning up temporary data from old regions at 1731357961831 (+8 ms)Running coprocessor post-open hooks at 1731357961834 (+3 ms)Region opened successfully at 1731357961834 2024-11-11T20:46:01,835 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731357961797 2024-11-11T20:46:01,837 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T20:46:01,837 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T20:46:01,838 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:01,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:01,839 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,40597,1731357961062, state=OPEN 2024-11-11T20:46:01,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:46:01,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:46:01,841 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:01,841 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:46:01,841 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:46:01,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T20:46:01,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,40597,1731357961062 in 199 msec 2024-11-11T20:46:01,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T20:46:01,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-11-11T20:46:01,847 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:46:01,847 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T20:46:01,849 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:46:01,849 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,40597,1731357961062, seqNum=-1] 2024-11-11T20:46:01,849 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:46:01,850 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56871, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:46:01,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:01,856 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 663 msec 2024-11-11T20:46:01,856 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731357961856, completionTime=-1 2024-11-11T20:46:01,857 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T20:46:01,857 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T20:46:01,859 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T20:46:01,859 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731358021859 2024-11-11T20:46:01,859 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731358081859 2024-11-11T20:46:01,859 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-11T20:46:01,859 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40891,1731357961024-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,859 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40891,1731357961024-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,859 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40891,1731357961024-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,859 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-51ca66f7ee3c:40891, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,859 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,859 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,861 DEBUG [master/51ca66f7ee3c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T20:46:01,863 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.774sec 2024-11-11T20:46:01,863 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T20:46:01,863 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T20:46:01,863 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T20:46:01,863 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T20:46:01,863 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T20:46:01,863 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40891,1731357961024-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:46:01,863 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40891,1731357961024-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T20:46:01,866 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T20:46:01,866 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T20:46:01,866 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40891,1731357961024-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:01,875 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c9f0fbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:46:01,875 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 51ca66f7ee3c,40891,-1 for getting cluster id 2024-11-11T20:46:01,875 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T20:46:01,877 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c763c986-7925-4f4b-829e-dc08878bfb77' 2024-11-11T20:46:01,877 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T20:46:01,877 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c763c986-7925-4f4b-829e-dc08878bfb77" 2024-11-11T20:46:01,877 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d80f041, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:46:01,877 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [51ca66f7ee3c,40891,-1] 2024-11-11T20:46:01,878 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T20:46:01,878 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:46:01,879 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T20:46:01,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10160004, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:46:01,880 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:46:01,881 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,40597,1731357961062, seqNum=-1] 2024-11-11T20:46:01,881 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:46:01,883 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41916, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:46:01,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=51ca66f7ee3c,40891,1731357961024 2024-11-11T20:46:01,884 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:01,886 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T20:46:01,887 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-11T20:46:01,887 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-11T20:46:01,887 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T20:46:01,888 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 51ca66f7ee3c,40891,1731357961024 2024-11-11T20:46:01,888 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@72eeaaa4 2024-11-11T20:46:01,888 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T20:46:01,891 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T20:46:01,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40891 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T20:46:01,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40891 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T20:46:01,892 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40891 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:46:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40891 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T20:46:01,895 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T20:46:01,895 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:01,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40891 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-11T20:46:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40891 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T20:46:01,897 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T20:46:01,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36619 is added to blk_1073741835_1011 (size=395) 2024-11-11T20:46:01,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44023 is added to blk_1073741835_1011 (size=395) 2024-11-11T20:46:01,908 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => dffd65cf76b43e3fb9e3516b7496bf3e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5 2024-11-11T20:46:01,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44023 is added to blk_1073741836_1012 (size=78) 2024-11-11T20:46:01,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36619 is added to blk_1073741836_1012 (size=78) 2024-11-11T20:46:01,919 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:01,919 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing dffd65cf76b43e3fb9e3516b7496bf3e, disabling compactions & flushes 2024-11-11T20:46:01,919 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:01,919 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:01,919 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. after waiting 0 ms 2024-11-11T20:46:01,919 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:01,919 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:01,919 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for dffd65cf76b43e3fb9e3516b7496bf3e: Waiting for close lock at 1731357961919Disabling compacts and flushes for region at 1731357961919Disabling writes for close at 1731357961919Writing region close event to WAL at 1731357961919Closed at 1731357961919 2024-11-11T20:46:01,921 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T20:46:01,921 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731357961921"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731357961921"}]},"ts":"1731357961921"} 2024-11-11T20:46:01,924 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T20:46:01,925 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T20:46:01,925 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731357961925"}]},"ts":"1731357961925"} 2024-11-11T20:46:01,927 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-11T20:46:01,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=dffd65cf76b43e3fb9e3516b7496bf3e, ASSIGN}] 2024-11-11T20:46:01,929 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=dffd65cf76b43e3fb9e3516b7496bf3e, ASSIGN 2024-11-11T20:46:01,930 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=dffd65cf76b43e3fb9e3516b7496bf3e, ASSIGN; state=OFFLINE, location=51ca66f7ee3c,40597,1731357961062; forceNewPlan=false, retain=false 2024-11-11T20:46:02,081 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=dffd65cf76b43e3fb9e3516b7496bf3e, regionState=OPENING, regionLocation=51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:02,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=dffd65cf76b43e3fb9e3516b7496bf3e, ASSIGN because future has completed 2024-11-11T20:46:02,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure dffd65cf76b43e3fb9e3516b7496bf3e, server=51ca66f7ee3c,40597,1731357961062}] 2024-11-11T20:46:02,247 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:02,247 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => dffd65cf76b43e3fb9e3516b7496bf3e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:46:02,248 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,248 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:02,248 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,248 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,251 INFO [StoreOpener-dffd65cf76b43e3fb9e3516b7496bf3e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,253 INFO [StoreOpener-dffd65cf76b43e3fb9e3516b7496bf3e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dffd65cf76b43e3fb9e3516b7496bf3e columnFamilyName info 2024-11-11T20:46:02,253 DEBUG [StoreOpener-dffd65cf76b43e3fb9e3516b7496bf3e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:02,254 INFO [StoreOpener-dffd65cf76b43e3fb9e3516b7496bf3e-1 {}] regionserver.HStore(327): Store=dffd65cf76b43e3fb9e3516b7496bf3e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:46:02,254 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,256 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/default/TestLogRolling-testLogRollOnPipelineRestart/dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,257 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/default/TestLogRolling-testLogRollOnPipelineRestart/dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,258 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,258 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,261 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,263 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/default/TestLogRolling-testLogRollOnPipelineRestart/dffd65cf76b43e3fb9e3516b7496bf3e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:46:02,263 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened dffd65cf76b43e3fb9e3516b7496bf3e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691577, jitterRate=-0.12061478197574615}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T20:46:02,263 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:02,264 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for dffd65cf76b43e3fb9e3516b7496bf3e: Running coprocessor pre-open hook at 1731357962248Writing region info on filesystem at 1731357962248Initializing all the Stores at 1731357962250 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357962250Cleaning up temporary data from old regions at 1731357962258 (+8 ms)Running coprocessor post-open hooks at 1731357962263 (+5 ms)Region opened successfully at 1731357962264 (+1 ms) 2024-11-11T20:46:02,265 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e., pid=6, masterSystemTime=1731357962240 2024-11-11T20:46:02,267 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:02,267 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:02,268 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=dffd65cf76b43e3fb9e3516b7496bf3e, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:02,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure dffd65cf76b43e3fb9e3516b7496bf3e, server=51ca66f7ee3c,40597,1731357961062 because future has completed 2024-11-11T20:46:02,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T20:46:02,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure dffd65cf76b43e3fb9e3516b7496bf3e, server=51ca66f7ee3c,40597,1731357961062 in 187 msec 2024-11-11T20:46:02,277 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T20:46:02,277 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=dffd65cf76b43e3fb9e3516b7496bf3e, ASSIGN in 346 msec 2024-11-11T20:46:02,278 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T20:46:02,278 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731357962278"}]},"ts":"1731357962278"} 2024-11-11T20:46:02,280 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-11T20:46:02,281 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T20:46:02,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 389 msec 2024-11-11T20:46:02,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:02,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:03,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:03,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:04,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:04,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:05,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:05,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:06,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:06,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:07,328 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T20:46:07,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:07,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:07,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:07,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:07,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:07,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:07,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:07,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:07,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:07,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:07,364 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T20:46:07,364 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T20:46:07,365 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T20:46:07,365 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-11T20:46:07,365 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:46:07,365 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T20:46:07,365 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T20:46:07,366 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-11T20:46:07,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:07,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:08,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:08,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:09,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:09,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:10,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:10,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:11,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:11,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:11,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40891 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T20:46:11,970 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-11T20:46:11,970 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-11T20:46:11,977 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T20:46:11,977 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:11,983 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e., hostname=51ca66f7ee3c,40597,1731357961062, seqNum=2] 2024-11-11T20:46:12,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:12,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:13,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:13,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:13,986 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 2024-11-11T20:46:13,987 WARN [ResponseProcessor for block BP-391654463-172.17.0.2-1731357960435:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-391654463-172.17.0.2-1731357960435:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:13,987 WARN [ResponseProcessor for block BP-391654463-172.17.0.2-1731357960435:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-391654463-172.17.0.2-1731357960435:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:13,987 WARN [ResponseProcessor for block BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:44023,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:13,987 WARN [DataStreamer for file /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 block BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK], DatanodeInfoWithStorage[127.0.0.1:44023,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44023,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK]) is bad. 2024-11-11T20:46:13,987 WARN [DataStreamer for file /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 block BP-391654463-172.17.0.2-1731357960435:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-391654463-172.17.0.2-1731357960435:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44023,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK], DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44023,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK]) is bad. 2024-11-11T20:46:13,987 WARN [DataStreamer for file /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta block BP-391654463-172.17.0.2-1731357960435:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-391654463-172.17.0.2-1731357960435:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44023,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK], DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44023,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK]) is bad. 2024-11-11T20:46:13,987 WARN [PacketResponder: BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44023] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:13,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966687860_22 at /127.0.0.1:36904 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44023:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36904 dst: /127.0.0.1:44023 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:13,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252317566_22 at /127.0.0.1:33908 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36619:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33908 dst: /127.0.0.1:36619 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:13,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966687860_22 at /127.0.0.1:33958 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36619:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33958 dst: /127.0.0.1:36619 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:13,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966687860_22 at /127.0.0.1:36900 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44023:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36900 dst: /127.0.0.1:44023 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:13,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252317566_22 at /127.0.0.1:36864 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44023:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36864 dst: /127.0.0.1:44023 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:13,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966687860_22 at /127.0.0.1:33932 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36619:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33932 dst: /127.0.0.1:36619 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:13,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@151f97c5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:13,990 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@232fa1ae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:46:13,990 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:46:13,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14a79ae9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:46:13,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1da660ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,STOPPED} 2024-11-11T20:46:13,991 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:46:13,991 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:46:13,991 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:46:13,991 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-391654463-172.17.0.2-1731357960435 (Datanode Uuid 09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab) service to localhost/127.0.0.1:36857 2024-11-11T20:46:13,992 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data3/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:13,992 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data4/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:13,992 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:46:14,005 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:14,008 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:46:14,009 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:46:14,009 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:46:14,009 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T20:46:14,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ccd5f6b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:46:14,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b2d1260{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:46:14,102 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@371cd9e1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/java.io.tmpdir/jetty-localhost-44703-hadoop-hdfs-3_4_1-tests_jar-_-any-12572213943686962369/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:14,103 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@644dfd01{HTTP/1.1, (http/1.1)}{localhost:44703} 2024-11-11T20:46:14,103 INFO [Time-limited test {}] server.Server(415): Started @163327ms 2024-11-11T20:46:14,104 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:46:14,122 WARN [ResponseProcessor for block BP-391654463-172.17.0.2-1731357960435:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-391654463-172.17.0.2-1731357960435:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:14,122 WARN [ResponseProcessor for block BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:14,122 WARN [ResponseProcessor for block BP-391654463-172.17.0.2-1731357960435:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-391654463-172.17.0.2-1731357960435:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:14,122 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966687860_22 at /127.0.0.1:49324 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36619:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49324 dst: /127.0.0.1:36619 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:14,122 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966687860_22 at /127.0.0.1:49326 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36619:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49326 dst: /127.0.0.1:36619 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:14,124 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@168478e8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:14,124 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252317566_22 at /127.0.0.1:49308 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36619:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49308 dst: /127.0.0.1:36619 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:14,124 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61549df4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:46:14,124 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:46:14,124 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b810f17{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:46:14,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fec76d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,STOPPED} 2024-11-11T20:46:14,129 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:46:14,129 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:46:14,129 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-391654463-172.17.0.2-1731357960435 (Datanode Uuid 253fda30-9b38-4c72-8812-1987252b2841) service to localhost/127.0.0.1:36857 2024-11-11T20:46:14,129 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:46:14,129 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data1/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:14,130 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data2/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:14,130 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:46:14,141 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:14,145 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:46:14,148 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:46:14,148 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:46:14,148 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:46:14,149 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bee8090{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:46:14,149 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70d6804b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:46:14,181 WARN [Thread-1338 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:46:14,184 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3266023051ee5825 with lease ID 0x5dc5c1a0148ddbe0: from storage DS-a1a81eb6-4e25-4e82-b894-7c166deab86b node DatanodeRegistration(127.0.0.1:43863, datanodeUuid=09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab, infoPort=34893, infoSecurePort=0, ipcPort=34615, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T20:46:14,184 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3266023051ee5825 with lease ID 0x5dc5c1a0148ddbe0: from storage DS-eb59f929-d355-4a5a-88ce-889b099919e3 node DatanodeRegistration(127.0.0.1:43863, datanodeUuid=09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab, infoPort=34893, infoSecurePort=0, ipcPort=34615, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:14,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@156f820b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/java.io.tmpdir/jetty-localhost-43089-hadoop-hdfs-3_4_1-tests_jar-_-any-6820041684183536110/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:14,245 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1763b2d6{HTTP/1.1, (http/1.1)}{localhost:43089} 2024-11-11T20:46:14,245 INFO [Time-limited test {}] server.Server(415): Started @163470ms 2024-11-11T20:46:14,246 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:46:14,309 WARN [Thread-1369 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:46:14,311 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac2be289e1d6ed97 with lease ID 0x5dc5c1a0148ddbe1: from storage DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb node DatanodeRegistration(127.0.0.1:38363, datanodeUuid=253fda30-9b38-4c72-8812-1987252b2841, infoPort=37001, infoSecurePort=0, ipcPort=40925, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T20:46:14,311 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac2be289e1d6ed97 with lease ID 0x5dc5c1a0148ddbe1: from storage DS-b069680b-e99f-4791-9618-c00cf1a30fc7 node DatanodeRegistration(127.0.0.1:38363, datanodeUuid=253fda30-9b38-4c72-8812-1987252b2841, infoPort=37001, infoSecurePort=0, ipcPort=40925, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:14,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:14,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:15,263 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-11T20:46:15,267 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-11T20:46:15,270 ERROR [FSHLog-0-hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5-prefix:51ca66f7ee3c,40597,1731357961062 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:15,270 WARN [FSHLog-0-hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5-prefix:51ca66f7ee3c,40597,1731357961062 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:15,270 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C40597%2C1731357961062:(num 1731357961436) roll requested 2024-11-11T20:46:15,271 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 2024-11-11T20:46:15,277 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 newFile=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 2024-11-11T20:46:15,277 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:15,277 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:15,277 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:15,277 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:15,278 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:15,278 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 2024-11-11T20:46:15,278 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:15,278 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:15,278 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 2024-11-11T20:46:15,279 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37001:37001),(127.0.0.1/127.0.0.1:34893:34893)] 2024-11-11T20:46:15,279 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 is not closed yet, will try archiving it next time 2024-11-11T20:46:15,279 WARN [IPC Server handler 2 on default port 36857 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-11T20:46:15,279 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 after 1ms 2024-11-11T20:46:15,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:15,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:16,186 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T20:46:16,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:16,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:17,283 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-11T20:46:17,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:17,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:18,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:18,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:19,281 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 after 4003ms 2024-11-11T20:46:19,287 WARN [ResponseProcessor for block BP-391654463-172.17.0.2-1731357960435:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-391654463-172.17.0.2-1731357960435:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:19,288 WARN [DataStreamer for file /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 block BP-391654463-172.17.0.2-1731357960435:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-391654463-172.17.0.2-1731357960435:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38363,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK], DatanodeInfoWithStorage[127.0.0.1:43863,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38363,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]) is bad. 2024-11-11T20:46:19,289 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966687860_22 at /127.0.0.1:47130 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38363:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47130 dst: /127.0.0.1:38363 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:19,289 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966687860_22 at /127.0.0.1:47084 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43863:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47084 dst: /127.0.0.1:43863 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:19,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@156f820b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:19,295 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1763b2d6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:46:19,295 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:46:19,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70d6804b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:46:19,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bee8090{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,STOPPED} 2024-11-11T20:46:19,297 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:46:19,297 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:46:19,297 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-391654463-172.17.0.2-1731357960435 (Datanode Uuid 253fda30-9b38-4c72-8812-1987252b2841) service to localhost/127.0.0.1:36857 2024-11-11T20:46:19,297 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:46:19,297 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data1/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:19,298 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data2/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:19,298 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:46:19,304 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:19,309 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:46:19,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:46:19,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:46:19,310 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:46:19,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e3b369c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:46:19,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d61cf28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:46:19,414 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a397072{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/java.io.tmpdir/jetty-localhost-36897-hadoop-hdfs-3_4_1-tests_jar-_-any-3179491647021138777/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:19,414 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5bc55163{HTTP/1.1, (http/1.1)}{localhost:36897} 2024-11-11T20:46:19,414 INFO [Time-limited test {}] server.Server(415): Started @168639ms 2024-11-11T20:46:19,415 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:46:19,433 WARN [ResponseProcessor for block BP-391654463-172.17.0.2-1731357960435:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-391654463-172.17.0.2-1731357960435:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:19,434 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966687860_22 at /127.0.0.1:44574 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43863:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44574 dst: /127.0.0.1:43863 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:19,462 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@371cd9e1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:19,463 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@644dfd01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:46:19,463 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:46:19,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b2d1260{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:46:19,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ccd5f6b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,STOPPED} 2024-11-11T20:46:19,464 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:46:19,464 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:46:19,464 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:46:19,464 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-391654463-172.17.0.2-1731357960435 (Datanode Uuid 09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab) service to localhost/127.0.0.1:36857 2024-11-11T20:46:19,465 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data3/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:19,465 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data4/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:19,465 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:46:19,477 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:19,480 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:46:19,481 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:46:19,481 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:46:19,482 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T20:46:19,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29d5ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:46:19,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7939cb3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:46:19,510 WARN [Thread-1412 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:46:19,512 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc1b8bf15082bcaed with lease ID 0x5dc5c1a0148ddbe2: from storage DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb node DatanodeRegistration(127.0.0.1:35911, datanodeUuid=253fda30-9b38-4c72-8812-1987252b2841, infoPort=44773, infoSecurePort=0, ipcPort=46093, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T20:46:19,512 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc1b8bf15082bcaed with lease ID 0x5dc5c1a0148ddbe2: from storage DS-b069680b-e99f-4791-9618-c00cf1a30fc7 node DatanodeRegistration(127.0.0.1:35911, datanodeUuid=253fda30-9b38-4c72-8812-1987252b2841, infoPort=44773, infoSecurePort=0, ipcPort=46093, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:19,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1dad3af2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/java.io.tmpdir/jetty-localhost-36491-hadoop-hdfs-3_4_1-tests_jar-_-any-6240213382402417550/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:19,592 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4307cd3{HTTP/1.1, (http/1.1)}{localhost:36491} 2024-11-11T20:46:19,592 INFO [Time-limited test {}] server.Server(415): Started @168816ms 2024-11-11T20:46:19,594 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:46:19,664 WARN [Thread-1443 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:46:19,666 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x35da962fec924b63 with lease ID 0x5dc5c1a0148ddbe3: from storage DS-a1a81eb6-4e25-4e82-b894-7c166deab86b node DatanodeRegistration(127.0.0.1:42491, datanodeUuid=09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab, infoPort=36269, infoSecurePort=0, ipcPort=34937, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:19,666 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x35da962fec924b63 with lease ID 0x5dc5c1a0148ddbe3: from storage DS-eb59f929-d355-4a5a-88ce-889b099919e3 node DatanodeRegistration(127.0.0.1:42491, datanodeUuid=09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab, infoPort=36269, infoSecurePort=0, ipcPort=34937, storageInfo=lv=-57;cid=testClusterID;nsid=1105776182;c=1731357960435), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:19,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:19,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:20,610 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-11T20:46:20,613 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-11T20:46:20,615 ERROR [FSHLog-0-hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5-prefix:51ca66f7ee3c,40597,1731357961062 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43863,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:20,615 WARN [FSHLog-0-hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5-prefix:51ca66f7ee3c,40597,1731357961062 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43863,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:20,615 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C40597%2C1731357961062:(num 1731357975270) roll requested 2024-11-11T20:46:20,615 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C40597%2C1731357961062.1731357980615 2024-11-11T20:46:20,623 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 newFile=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357980615 2024-11-11T20:46:20,623 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:20,623 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:20,623 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:20,624 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:20,624 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:20,624 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357980615 2024-11-11T20:46:20,624 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43863,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:20,624 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43863,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:20,625 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 2024-11-11T20:46:20,625 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44773:44773),(127.0.0.1/127.0.0.1:36269:36269)] 2024-11-11T20:46:20,625 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 is not closed yet, will try archiving it next time 2024-11-11T20:46:20,625 WARN [IPC Server handler 1 on default port 36857 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-11T20:46:20,626 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 after 1ms 2024-11-11T20:46:20,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:20,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:21,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:21,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:22,515 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T20:46:22,627 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 2024-11-11T20:46:22,636 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357980615 newFile=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 2024-11-11T20:46:22,636 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:22,637 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:22,637 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:22,637 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:22,637 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:22,638 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357980615 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 2024-11-11T20:46:22,640 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44773:44773),(127.0.0.1/127.0.0.1:36269:36269)] 2024-11-11T20:46:22,640 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 is not closed yet, will try archiving it next time 2024-11-11T20:46:22,640 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357980615 is not closed yet, will try archiving it next time 2024-11-11T20:46:22,640 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 2024-11-11T20:46:22,641 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 2024-11-11T20:46:22,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741838_1019 (size=1264) 2024-11-11T20:46:22,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741838_1019 (size=1264) 2024-11-11T20:46:22,642 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 after 0ms 2024-11-11T20:46:22,642 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 2024-11-11T20:46:22,642 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 is not closed yet, will try archiving it next time 2024-11-11T20:46:22,651 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731357962264/Put/vlen=218/seqid=0] 2024-11-11T20:46:22,651 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731357971984/Put/vlen=1045/seqid=0] 2024-11-11T20:46:22,651 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357961436 2024-11-11T20:46:22,651 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 2024-11-11T20:46:22,651 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 2024-11-11T20:46:22,652 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 after 1ms 2024-11-11T20:46:22,652 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 2024-11-11T20:46:22,655 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731357975269/Put/vlen=1045/seqid=0] 2024-11-11T20:46:22,655 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731357977284/Put/vlen=1045/seqid=0] 2024-11-11T20:46:22,655 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 2024-11-11T20:46:22,655 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357980615 2024-11-11T20:46:22,655 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357980615 2024-11-11T20:46:22,656 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357980615 after 1ms 2024-11-11T20:46:22,656 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357980615 2024-11-11T20:46:22,659 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731357980614/Put/vlen=1045/seqid=0] 2024-11-11T20:46:22,659 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 2024-11-11T20:46:22,659 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 2024-11-11T20:46:22,660 WARN [IPC Server handler 2 on default port 36857 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-11T20:46:22,660 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 after 1ms 2024-11-11T20:46:22,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:22,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:23,517 WARN [ResponseProcessor for block BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:23,517 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252317566_22 at /127.0.0.1:49536 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:35911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49536 dst: /127.0.0.1:35911 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:35911 remote=/127.0.0.1:49536]. Total timeout mills is 60000, 59118 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:23,518 WARN [DataStreamer for file /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 block BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35911,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-a1a81eb6-4e25-4e82-b894-7c166deab86b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35911,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]) is bad. 2024-11-11T20:46:23,518 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252317566_22 at /127.0.0.1:57062 [Receiving block BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42491:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57062 dst: /127.0.0.1:42491 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:46:23,525 WARN [DataStreamer for file /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 block BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:23,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741839_1022 (size=85) 2024-11-11T20:46:23,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741839_1022 (size=85) 2024-11-11T20:46:23,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:23,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:24,627 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357975270 after 4002ms 2024-11-11T20:46:24,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:24,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:25,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:25,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:26,662 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 after 4003ms 2024-11-11T20:46:26,662 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 2024-11-11T20:46:26,671 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 2024-11-11T20:46:26,671 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing dffd65cf76b43e3fb9e3516b7496bf3e 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-11T20:46:26,672 ERROR [FSHLog-0-hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5-prefix:51ca66f7ee3c,40597,1731357961062 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:26,673 WARN [FSHLog-0-hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5-prefix:51ca66f7ee3c,40597,1731357961062 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:26,674 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C40597%2C1731357961062:(num 1731357982627) roll requested 2024-11-11T20:46:26,674 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C40597%2C1731357961062.1731357986674 2024-11-11T20:46:26,681 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 newFile=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357986674 2024-11-11T20:46:26,681 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:26,681 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:26,681 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:26,681 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:26,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:26,681 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357986674 2024-11-11T20:46:26,681 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:26,682 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-391654463-172.17.0.2-1731357960435:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:26,683 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 2024-11-11T20:46:26,683 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 after 0ms 2024-11-11T20:46:26,685 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36269:36269),(127.0.0.1/127.0.0.1:44773:44773)] 2024-11-11T20:46:26,685 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 to hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/oldWALs/51ca66f7ee3c%2C40597%2C1731357961062.1731357982627 2024-11-11T20:46:26,702 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/default/TestLogRolling-testLogRollOnPipelineRestart/dffd65cf76b43e3fb9e3516b7496bf3e/.tmp/info/46100b674c3e4b88bab3e03c4ef55153 is 1080, key is row1002/info:/1731357971984/Put/seqid=0 2024-11-11T20:46:26,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741841_1024 (size=9270) 2024-11-11T20:46:26,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741841_1024 (size=9270) 2024-11-11T20:46:26,708 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/default/TestLogRolling-testLogRollOnPipelineRestart/dffd65cf76b43e3fb9e3516b7496bf3e/.tmp/info/46100b674c3e4b88bab3e03c4ef55153 2024-11-11T20:46:26,714 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/default/TestLogRolling-testLogRollOnPipelineRestart/dffd65cf76b43e3fb9e3516b7496bf3e/.tmp/info/46100b674c3e4b88bab3e03c4ef55153 as hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/default/TestLogRolling-testLogRollOnPipelineRestart/dffd65cf76b43e3fb9e3516b7496bf3e/info/46100b674c3e4b88bab3e03c4ef55153 2024-11-11T20:46:26,721 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/default/TestLogRolling-testLogRollOnPipelineRestart/dffd65cf76b43e3fb9e3516b7496bf3e/info/46100b674c3e4b88bab3e03c4ef55153, entries=4, sequenceid=8, filesize=9.1 K 2024-11-11T20:46:26,722 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for dffd65cf76b43e3fb9e3516b7496bf3e in 51ms, sequenceid=8, compaction requested=false 2024-11-11T20:46:26,722 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for dffd65cf76b43e3fb9e3516b7496bf3e: 2024-11-11T20:46:26,722 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-11T20:46:26,722 ERROR [FSHLog-0-hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5-prefix:51ca66f7ee3c,40597,1731357961062.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:26,723 WARN [FSHLog-0-hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5-prefix:51ca66f7ee3c,40597,1731357961062.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:26,723 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C40597%2C1731357961062.meta:.meta(num 1731357961813) roll requested 2024-11-11T20:46:26,723 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357986723.meta 2024-11-11T20:46:26,728 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:26,728 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:26,728 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:26,728 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:26,728 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:26,728 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357986723.meta 2024-11-11T20:46:26,728 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:26,729 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:26,729 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta 2024-11-11T20:46:26,729 WARN [IPC Server handler 4 on default port 36857 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1015 2024-11-11T20:46:26,729 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta after 0ms 2024-11-11T20:46:26,732 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44773:44773),(127.0.0.1/127.0.0.1:36269:36269)] 2024-11-11T20:46:26,732 DEBUG [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta is not closed yet, will try archiving it next time 2024-11-11T20:46:26,747 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/.tmp/info/94a0a9dd6e7a410ba91ce268679266ba is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e./info:regioninfo/1731357962268/Put/seqid=0 2024-11-11T20:46:26,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741843_1027 (size=7125) 2024-11-11T20:46:26,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741843_1027 (size=7125) 2024-11-11T20:46:26,752 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/.tmp/info/94a0a9dd6e7a410ba91ce268679266ba 2024-11-11T20:46:26,771 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/.tmp/ns/bffee7f0ce9d4a05954fe01e0bcf988c is 43, key is default/ns:d/1731357961851/Put/seqid=0 2024-11-11T20:46:26,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741844_1028 (size=5153) 2024-11-11T20:46:26,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741844_1028 (size=5153) 2024-11-11T20:46:26,776 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/.tmp/ns/bffee7f0ce9d4a05954fe01e0bcf988c 2024-11-11T20:46:26,794 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/.tmp/table/1276f61bc5a54ce5a220c9336365a51e is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731357962278/Put/seqid=0 2024-11-11T20:46:26,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741845_1029 (size=5438) 2024-11-11T20:46:26,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741845_1029 (size=5438) 2024-11-11T20:46:26,799 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/.tmp/table/1276f61bc5a54ce5a220c9336365a51e 2024-11-11T20:46:26,804 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/.tmp/info/94a0a9dd6e7a410ba91ce268679266ba as hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/info/94a0a9dd6e7a410ba91ce268679266ba 2024-11-11T20:46:26,810 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/info/94a0a9dd6e7a410ba91ce268679266ba, entries=10, sequenceid=11, filesize=7.0 K 2024-11-11T20:46:26,811 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/.tmp/ns/bffee7f0ce9d4a05954fe01e0bcf988c as hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/ns/bffee7f0ce9d4a05954fe01e0bcf988c 2024-11-11T20:46:26,816 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/ns/bffee7f0ce9d4a05954fe01e0bcf988c, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T20:46:26,817 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/.tmp/table/1276f61bc5a54ce5a220c9336365a51e as hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/table/1276f61bc5a54ce5a220c9336365a51e 2024-11-11T20:46:26,823 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/table/1276f61bc5a54ce5a220c9336365a51e, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T20:46:26,824 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false 2024-11-11T20:46:26,824 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T20:46:26,829 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T20:46:26,829 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:46:26,830 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:46:26,830 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:46:26,830 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:46:26,830 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T20:46:26,830 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T20:46:26,830 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1844858994, stopped=false 2024-11-11T20:46:26,830 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=51ca66f7ee3c,40891,1731357961024 2024-11-11T20:46:26,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:46:26,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:46:26,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:26,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:26,831 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:46:26,831 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:46:26,832 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:46:26,832 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:46:26,832 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '51ca66f7ee3c,40597,1731357961062' ***** 2024-11-11T20:46:26,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:46:26,832 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T20:46:26,832 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T20:46:26,832 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T20:46:26,832 INFO [RS:0;51ca66f7ee3c:40597 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T20:46:26,832 INFO [RS:0;51ca66f7ee3c:40597 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T20:46:26,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:46:26,832 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(3091): Received CLOSE for dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:26,833 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(959): stopping server 51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:26,833 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:46:26,833 INFO [RS:0;51ca66f7ee3c:40597 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;51ca66f7ee3c:40597. 2024-11-11T20:46:26,833 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing dffd65cf76b43e3fb9e3516b7496bf3e, disabling compactions & flushes 2024-11-11T20:46:26,833 DEBUG [RS:0;51ca66f7ee3c:40597 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:46:26,833 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:26,833 DEBUG [RS:0;51ca66f7ee3c:40597 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:46:26,833 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:26,833 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. after waiting 0 ms 2024-11-11T20:46:26,833 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T20:46:26,833 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:26,833 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T20:46:26,833 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T20:46:26,833 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T20:46:26,833 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T20:46:26,833 DEBUG [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(1325): Online Regions={dffd65cf76b43e3fb9e3516b7496bf3e=TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T20:46:26,833 DEBUG [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, dffd65cf76b43e3fb9e3516b7496bf3e 2024-11-11T20:46:26,833 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:46:26,833 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:46:26,834 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:46:26,834 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:46:26,834 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:46:26,837 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/default/TestLogRolling-testLogRollOnPipelineRestart/dffd65cf76b43e3fb9e3516b7496bf3e/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-11T20:46:26,837 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T20:46:26,838 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:26,838 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for dffd65cf76b43e3fb9e3516b7496bf3e: Waiting for close lock at 1731357986833Running coprocessor pre-close hooks at 1731357986833Disabling compacts and flushes for region at 1731357986833Disabling writes for close at 1731357986833Writing region close event to WAL at 1731357986834 (+1 ms)Running coprocessor post-close hooks at 1731357986838 (+4 ms)Closed at 1731357986838 2024-11-11T20:46:26,838 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:46:26,838 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:46:26,838 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731357961891.dffd65cf76b43e3fb9e3516b7496bf3e. 2024-11-11T20:46:26,838 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731357986833Running coprocessor pre-close hooks at 1731357986833Disabling compacts and flushes for region at 1731357986833Disabling writes for close at 1731357986834 (+1 ms)Writing region close event to WAL at 1731357986835 (+1 ms)Running coprocessor post-close hooks at 1731357986838 (+3 ms)Closed at 1731357986838 2024-11-11T20:46:26,838 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T20:46:26,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:26,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:27,034 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(976): stopping server 51ca66f7ee3c,40597,1731357961062; all regions closed. 2024-11-11T20:46:27,034 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:27,034 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:27,034 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:27,034 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:27,035 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:27,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741842_1025 (size=825) 2024-11-11T20:46:27,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741842_1025 (size=825) 2024-11-11T20:46:27,305 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T20:46:27,306 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T20:46:27,306 INFO [regionserver/51ca66f7ee3c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:46:27,669 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T20:46:27,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:27,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:28,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:28,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:29,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:29,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:30,731 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta after 4002ms 2024-11-11T20:46:30,732 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/WALs/51ca66f7ee3c,40597,1731357961062/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta to hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/oldWALs/51ca66f7ee3c%2C40597%2C1731357961062.meta.1731357961813.meta 2024-11-11T20:46:30,738 DEBUG [RS:0;51ca66f7ee3c:40597 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/oldWALs 2024-11-11T20:46:30,738 INFO [RS:0;51ca66f7ee3c:40597 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C40597%2C1731357961062.meta:.meta(num 1731357986723) 2024-11-11T20:46:30,739 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,739 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,740 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,740 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,740 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741840_1023 (size=1162) 2024-11-11T20:46:30,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741840_1023 (size=1162) 2024-11-11T20:46:30,750 DEBUG [RS:0;51ca66f7ee3c:40597 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/oldWALs 2024-11-11T20:46:30,750 INFO [RS:0;51ca66f7ee3c:40597 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C40597%2C1731357961062:(num 1731357986674) 2024-11-11T20:46:30,750 DEBUG [RS:0;51ca66f7ee3c:40597 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:46:30,750 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:46:30,751 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:46:30,751 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.ChoreService(370): Chore service for: regionserver/51ca66f7ee3c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T20:46:30,751 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:46:30,751 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:46:30,751 INFO [RS:0;51ca66f7ee3c:40597 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40597 2024-11-11T20:46:30,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/51ca66f7ee3c,40597,1731357961062 2024-11-11T20:46:30,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:46:30,753 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:46:30,753 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [51ca66f7ee3c,40597,1731357961062] 2024-11-11T20:46:30,754 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/51ca66f7ee3c,40597,1731357961062 already deleted, retry=false 2024-11-11T20:46:30,754 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 51ca66f7ee3c,40597,1731357961062 expired; onlineServers=0 2024-11-11T20:46:30,754 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '51ca66f7ee3c,40891,1731357961024' ***** 2024-11-11T20:46:30,754 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T20:46:30,754 INFO [M:0;51ca66f7ee3c:40891 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:46:30,755 INFO [M:0;51ca66f7ee3c:40891 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:46:30,755 DEBUG [M:0;51ca66f7ee3c:40891 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T20:46:30,755 DEBUG [M:0;51ca66f7ee3c:40891 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T20:46:30,755 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T20:46:30,755 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357961205 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357961205,5,FailOnTimeoutGroup] 2024-11-11T20:46:30,755 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357961205 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357961205,5,FailOnTimeoutGroup] 2024-11-11T20:46:30,755 INFO [M:0;51ca66f7ee3c:40891 {}] hbase.ChoreService(370): Chore service for: master/51ca66f7ee3c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T20:46:30,755 INFO [M:0;51ca66f7ee3c:40891 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:46:30,755 DEBUG [M:0;51ca66f7ee3c:40891 {}] master.HMaster(1795): Stopping service threads 2024-11-11T20:46:30,755 INFO [M:0;51ca66f7ee3c:40891 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T20:46:30,755 INFO [M:0;51ca66f7ee3c:40891 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:46:30,756 INFO [M:0;51ca66f7ee3c:40891 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T20:46:30,756 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T20:46:30,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T20:46:30,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:30,756 DEBUG [M:0;51ca66f7ee3c:40891 {}] zookeeper.ZKUtil(347): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T20:46:30,756 WARN [M:0;51ca66f7ee3c:40891 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T20:46:30,757 INFO [M:0;51ca66f7ee3c:40891 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/.lastflushedseqids 2024-11-11T20:46:30,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741846_1030 (size=130) 2024-11-11T20:46:30,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741846_1030 (size=130) 2024-11-11T20:46:30,763 INFO [M:0;51ca66f7ee3c:40891 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T20:46:30,763 INFO [M:0;51ca66f7ee3c:40891 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T20:46:30,763 DEBUG [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:46:30,763 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:30,763 DEBUG [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:30,763 DEBUG [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:46:30,763 DEBUG [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:30,763 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-11T20:46:30,764 ERROR [FSHLog-0-hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData-prefix:51ca66f7ee3c,40891,1731357961024 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:30,764 WARN [FSHLog-0-hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData-prefix:51ca66f7ee3c,40891,1731357961024 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:30,764 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 51ca66f7ee3c%2C40891%2C1731357961024:(num 1731357961140) roll requested 2024-11-11T20:46:30,764 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C40891%2C1731357961024.1731357990764 2024-11-11T20:46:30,769 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,769 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,769 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,769 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,770 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,770 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024/51ca66f7ee3c%2C40891%2C1731357961024.1731357990764 2024-11-11T20:46:30,770 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:30,770 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36619,DS-d3abf4b2-410d-491f-b3e6-c3a5acc269cb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T20:46:30,770 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 2024-11-11T20:46:30,771 WARN [IPC Server handler 0 on default port 36857 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-11T20:46:30,771 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 after 1ms 2024-11-11T20:46:30,771 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36269:36269),(127.0.0.1/127.0.0.1:44773:44773)] 2024-11-11T20:46:30,771 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 is not closed yet, will try archiving it next time 2024-11-11T20:46:30,791 DEBUG [M:0;51ca66f7ee3c:40891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bc055ee84ac346bc900d5be4378c0781 is 82, key is hbase:meta,,1/info:regioninfo/1731357961838/Put/seqid=0 2024-11-11T20:46:30,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741848_1033 (size=5672) 2024-11-11T20:46:30,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741848_1033 (size=5672) 2024-11-11T20:46:30,796 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bc055ee84ac346bc900d5be4378c0781 2024-11-11T20:46:30,815 DEBUG [M:0;51ca66f7ee3c:40891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/720437800d20416ba4f4c3b78d3c21e9 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731357962282/Put/seqid=0 2024-11-11T20:46:30,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741849_1034 (size=6117) 2024-11-11T20:46:30,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741849_1034 (size=6117) 2024-11-11T20:46:30,820 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/720437800d20416ba4f4c3b78d3c21e9 2024-11-11T20:46:30,837 DEBUG [M:0;51ca66f7ee3c:40891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4f6c05adabc0448d82a16330c2696d81 is 69, key is 51ca66f7ee3c,40597,1731357961062/rs:state/1731357961292/Put/seqid=0 2024-11-11T20:46:30,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741850_1035 (size=5156) 2024-11-11T20:46:30,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741850_1035 (size=5156) 2024-11-11T20:46:30,842 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4f6c05adabc0448d82a16330c2696d81 2024-11-11T20:46:30,854 INFO [RS:0;51ca66f7ee3c:40597 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:46:30,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:46:30,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40597-0x1003089479b0001, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:46:30,854 INFO [RS:0;51ca66f7ee3c:40597 {}] regionserver.HRegionServer(1031): Exiting; stopping=51ca66f7ee3c,40597,1731357961062; zookeeper connection closed. 2024-11-11T20:46:30,854 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@775c43de {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@775c43de 2024-11-11T20:46:30,854 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T20:46:30,860 DEBUG [M:0;51ca66f7ee3c:40891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/452d6fa5f6f144c0a8b9a2a9485472ee is 52, key is load_balancer_on/state:d/1731357961885/Put/seqid=0 2024-11-11T20:46:30,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741851_1036 (size=5056) 2024-11-11T20:46:30,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741851_1036 (size=5056) 2024-11-11T20:46:30,865 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/452d6fa5f6f144c0a8b9a2a9485472ee 2024-11-11T20:46:30,871 DEBUG [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bc055ee84ac346bc900d5be4378c0781 as hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bc055ee84ac346bc900d5be4378c0781 2024-11-11T20:46:30,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:30,877 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bc055ee84ac346bc900d5be4378c0781, entries=8, sequenceid=56, filesize=5.5 K 2024-11-11T20:46:30,878 DEBUG [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/720437800d20416ba4f4c3b78d3c21e9 as hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/720437800d20416ba4f4c3b78d3c21e9 2024-11-11T20:46:30,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:30,883 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/720437800d20416ba4f4c3b78d3c21e9, entries=6, sequenceid=56, filesize=6.0 K 2024-11-11T20:46:30,884 DEBUG [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4f6c05adabc0448d82a16330c2696d81 as hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4f6c05adabc0448d82a16330c2696d81 2024-11-11T20:46:30,890 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4f6c05adabc0448d82a16330c2696d81, entries=1, sequenceid=56, filesize=5.0 K 2024-11-11T20:46:30,891 DEBUG [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/452d6fa5f6f144c0a8b9a2a9485472ee as hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/452d6fa5f6f144c0a8b9a2a9485472ee 2024-11-11T20:46:30,896 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/452d6fa5f6f144c0a8b9a2a9485472ee, entries=1, sequenceid=56, filesize=4.9 K 2024-11-11T20:46:30,897 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 134ms, sequenceid=56, compaction requested=false 2024-11-11T20:46:30,898 INFO [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:30,899 DEBUG [M:0;51ca66f7ee3c:40891 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731357990763Disabling compacts and flushes for region at 1731357990763Disabling writes for close at 1731357990763Obtaining lock to block concurrent updates at 1731357990763Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731357990763Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731357990764 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731357990772 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731357990772Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731357990791 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731357990791Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731357990800 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731357990814 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731357990814Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731357990824 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731357990836 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731357990836Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731357990846 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731357990859 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731357990859Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b375760: reopening flushed file at 1731357990870 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d707727: reopening flushed file at 1731357990877 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5aa1d596: reopening flushed file at 1731357990883 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@669cdf9e: reopening flushed file at 1731357990890 (+7 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 134ms, sequenceid=56, compaction requested=false at 1731357990897 (+7 ms)Writing region close event to WAL at 1731357990898 (+1 ms)Closed at 1731357990898 2024-11-11T20:46:30,899 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,899 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,899 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,899 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,899 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:46:30,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35911 is added to blk_1073741847_1031 (size=757) 2024-11-11T20:46:30,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741847_1031 (size=757) 2024-11-11T20:46:31,001 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T20:46:31,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:31,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:31,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:32,375 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T20:46:32,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:32,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:32,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:33,669 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T20:46:33,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:33,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:34,773 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 after 4002ms 2024-11-11T20:46:34,774 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/WALs/51ca66f7ee3c,40891,1731357961024/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 to hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/oldWALs/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 2024-11-11T20:46:34,782 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/MasterData/oldWALs/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140 to hdfs://localhost:36857/user/jenkins/test-data/6a1212d5-295a-d8e6-00ae-a75aec646eb5/oldWALs/51ca66f7ee3c%2C40891%2C1731357961024.1731357961140$masterlocalwal$ 2024-11-11T20:46:34,783 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:46:34,783 INFO [M:0;51ca66f7ee3c:40891 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T20:46:34,783 INFO [M:0;51ca66f7ee3c:40891 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40891 2024-11-11T20:46:34,783 INFO [M:0;51ca66f7ee3c:40891 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:46:34,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:34,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:34,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:46:34,885 INFO [M:0;51ca66f7ee3c:40891 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:46:34,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40891-0x1003089479b0000, quorum=127.0.0.1:56616, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:46:34,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1dad3af2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:34,892 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4307cd3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:46:34,892 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:46:34,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7939cb3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:46:34,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29d5ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,STOPPED} 2024-11-11T20:46:34,895 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:46:34,895 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:46:34,895 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:46:34,895 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-391654463-172.17.0.2-1731357960435 (Datanode Uuid 09dbf34a-6fb5-44ad-8b0f-667c3c8b31ab) service to localhost/127.0.0.1:36857 2024-11-11T20:46:34,896 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data3/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:34,896 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data4/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:34,897 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:46:34,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a397072{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:34,900 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5bc55163{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:46:34,900 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:46:34,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d61cf28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:46:34,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e3b369c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,STOPPED} 2024-11-11T20:46:34,902 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:46:34,902 WARN [BP-391654463-172.17.0.2-1731357960435 heartbeating to localhost/127.0.0.1:36857 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-391654463-172.17.0.2-1731357960435 (Datanode Uuid 253fda30-9b38-4c72-8812-1987252b2841) service to localhost/127.0.0.1:36857 2024-11-11T20:46:34,902 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:46:34,902 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:46:34,903 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data1/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:34,903 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/cluster_adf983fe-fa26-345e-6630-2ba2317a1d11/data/data2/current/BP-391654463-172.17.0.2-1731357960435 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:46:34,903 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:46:34,909 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a59d25d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:46:34,910 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e526681{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:46:34,910 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:46:34,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c5497db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:46:34,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@464ea64c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir/,STOPPED} 2024-11-11T20:46:34,917 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T20:46:34,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T20:46:34,944 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 153) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36857 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:36857 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36857 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36857 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36857 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36857 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36857 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36857 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=127 (was 115) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3975 (was 4202) 2024-11-11T20:46:34,950 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=127, ProcessCount=11, AvailableMemoryMB=3975 2024-11-11T20:46:34,950 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T20:46:34,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.log.dir so I do NOT create it in target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76 2024-11-11T20:46:34,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf12611e-2316-150e-8454-e237ac8003dc/hadoop.tmp.dir so I do NOT create it in target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76 2024-11-11T20:46:34,951 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69, deleteOnExit=true 2024-11-11T20:46:34,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T20:46:34,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/test.cache.data in system properties and HBase conf 2024-11-11T20:46:34,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T20:46:34,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/hadoop.log.dir in system properties and HBase conf 2024-11-11T20:46:34,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T20:46:34,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T20:46:34,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T20:46:34,951 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/nfs.dump.dir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/java.io.tmpdir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T20:46:34,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T20:46:34,966 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:46:35,013 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:35,017 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:46:35,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:46:35,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:46:35,018 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:46:35,018 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:35,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@642a12de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:46:35,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a5372a5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:46:35,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3cb009a1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/java.io.tmpdir/jetty-localhost-32941-hadoop-hdfs-3_4_1-tests_jar-_-any-14926835325648442394/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:46:35,112 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6349ff4f{HTTP/1.1, (http/1.1)}{localhost:32941} 2024-11-11T20:46:35,112 INFO [Time-limited test {}] server.Server(415): Started @184336ms 2024-11-11T20:46:35,123 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:46:35,162 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:35,167 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:46:35,168 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:46:35,168 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:46:35,168 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:46:35,169 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30c9629f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:46:35,169 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21edc7fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:46:35,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20ad0bae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/java.io.tmpdir/jetty-localhost-46423-hadoop-hdfs-3_4_1-tests_jar-_-any-9695641630793069899/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:35,263 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@df90952{HTTP/1.1, (http/1.1)}{localhost:46423} 2024-11-11T20:46:35,263 INFO [Time-limited test {}] server.Server(415): Started @184487ms 2024-11-11T20:46:35,264 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:46:35,291 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:46:35,294 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:46:35,295 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:46:35,295 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:46:35,295 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T20:46:35,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@222377c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:46:35,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e21aaf2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:46:35,321 WARN [Thread-1637 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/data/data1/current/BP-1837961949-172.17.0.2-1731357994975/current, will proceed with Du for space computation calculation, 2024-11-11T20:46:35,321 WARN [Thread-1638 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/data/data2/current/BP-1837961949-172.17.0.2-1731357994975/current, will proceed with Du for space computation calculation, 2024-11-11T20:46:35,340 WARN [Thread-1616 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:46:35,342 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x95243281621c978e with lease ID 0x7476776183d1257: Processing first storage report for DS-be8caaab-e86a-4169-9ef9-902fbd3eed9b from datanode DatanodeRegistration(127.0.0.1:41687, datanodeUuid=3e5b2e5d-d795-4486-9204-0538e1323898, infoPort=40017, infoSecurePort=0, ipcPort=34429, storageInfo=lv=-57;cid=testClusterID;nsid=128129550;c=1731357994975) 2024-11-11T20:46:35,342 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x95243281621c978e with lease ID 0x7476776183d1257: from storage DS-be8caaab-e86a-4169-9ef9-902fbd3eed9b node DatanodeRegistration(127.0.0.1:41687, datanodeUuid=3e5b2e5d-d795-4486-9204-0538e1323898, infoPort=40017, infoSecurePort=0, ipcPort=34429, storageInfo=lv=-57;cid=testClusterID;nsid=128129550;c=1731357994975), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:35,342 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x95243281621c978e with lease ID 0x7476776183d1257: Processing first storage report for DS-b255e07e-ef60-4c5c-a5dd-e137055fc3c1 from datanode DatanodeRegistration(127.0.0.1:41687, datanodeUuid=3e5b2e5d-d795-4486-9204-0538e1323898, infoPort=40017, infoSecurePort=0, ipcPort=34429, storageInfo=lv=-57;cid=testClusterID;nsid=128129550;c=1731357994975) 2024-11-11T20:46:35,343 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x95243281621c978e with lease ID 0x7476776183d1257: from storage DS-b255e07e-ef60-4c5c-a5dd-e137055fc3c1 node DatanodeRegistration(127.0.0.1:41687, datanodeUuid=3e5b2e5d-d795-4486-9204-0538e1323898, infoPort=40017, infoSecurePort=0, ipcPort=34429, storageInfo=lv=-57;cid=testClusterID;nsid=128129550;c=1731357994975), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:35,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ca63b58{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/java.io.tmpdir/jetty-localhost-34039-hadoop-hdfs-3_4_1-tests_jar-_-any-14047784762702727088/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:46:35,395 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2719a663{HTTP/1.1, (http/1.1)}{localhost:34039} 2024-11-11T20:46:35,395 INFO [Time-limited test {}] server.Server(415): Started @184620ms 2024-11-11T20:46:35,396 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:46:35,450 WARN [Thread-1663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/data/data3/current/BP-1837961949-172.17.0.2-1731357994975/current, will proceed with Du for space computation calculation, 2024-11-11T20:46:35,450 WARN [Thread-1664 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/data/data4/current/BP-1837961949-172.17.0.2-1731357994975/current, will proceed with Du for space computation calculation, 2024-11-11T20:46:35,467 WARN [Thread-1652 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:46:35,469 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b108509dd13ad06 with lease ID 0x7476776183d1258: Processing first storage report for DS-6b833f8a-7575-4764-ae41-6cfbb981477f from datanode DatanodeRegistration(127.0.0.1:43291, datanodeUuid=57a8f8c0-e0d1-4ab0-8cf2-b22a9c7c64f5, infoPort=37095, infoSecurePort=0, ipcPort=44785, storageInfo=lv=-57;cid=testClusterID;nsid=128129550;c=1731357994975) 2024-11-11T20:46:35,469 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b108509dd13ad06 with lease ID 0x7476776183d1258: from storage DS-6b833f8a-7575-4764-ae41-6cfbb981477f node DatanodeRegistration(127.0.0.1:43291, datanodeUuid=57a8f8c0-e0d1-4ab0-8cf2-b22a9c7c64f5, infoPort=37095, infoSecurePort=0, ipcPort=44785, storageInfo=lv=-57;cid=testClusterID;nsid=128129550;c=1731357994975), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:35,470 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b108509dd13ad06 with lease ID 0x7476776183d1258: Processing first storage report for DS-ade6d2bd-f713-44f7-b261-45d1a5f75c9a from datanode DatanodeRegistration(127.0.0.1:43291, datanodeUuid=57a8f8c0-e0d1-4ab0-8cf2-b22a9c7c64f5, infoPort=37095, infoSecurePort=0, ipcPort=44785, storageInfo=lv=-57;cid=testClusterID;nsid=128129550;c=1731357994975) 2024-11-11T20:46:35,470 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b108509dd13ad06 with lease ID 0x7476776183d1258: from storage DS-ade6d2bd-f713-44f7-b261-45d1a5f75c9a node DatanodeRegistration(127.0.0.1:43291, datanodeUuid=57a8f8c0-e0d1-4ab0-8cf2-b22a9c7c64f5, infoPort=37095, infoSecurePort=0, ipcPort=44785, storageInfo=lv=-57;cid=testClusterID;nsid=128129550;c=1731357994975), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:46:35,519 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76 2024-11-11T20:46:35,522 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/zookeeper_0, clientPort=58490, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T20:46:35,523 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58490 2024-11-11T20:46:35,523 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:35,525 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:35,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:46:35,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:46:35,534 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d with version=8 2024-11-11T20:46:35,535 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/hbase-staging 2024-11-11T20:46:35,536 INFO [Time-limited test {}] client.ConnectionUtils(128): master/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:46:35,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:35,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:35,536 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:46:35,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:35,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:46:35,536 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T20:46:35,536 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:46:35,537 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42863 2024-11-11T20:46:35,538 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42863 connecting to ZooKeeper ensemble=127.0.0.1:58490 2024-11-11T20:46:35,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:428630x0, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:46:35,543 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42863-0x1003089ce700000 connected 2024-11-11T20:46:35,558 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:35,559 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:35,561 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:46:35,561 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d, hbase.cluster.distributed=false 2024-11-11T20:46:35,563 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:46:35,563 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42863 2024-11-11T20:46:35,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42863 2024-11-11T20:46:35,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42863 2024-11-11T20:46:35,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42863 2024-11-11T20:46:35,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42863 2024-11-11T20:46:35,580 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:46:35,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:35,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:35,580 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:46:35,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:46:35,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:46:35,581 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T20:46:35,581 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:46:35,581 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34653 2024-11-11T20:46:35,583 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34653 connecting to ZooKeeper ensemble=127.0.0.1:58490 2024-11-11T20:46:35,583 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:35,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:35,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:346530x0, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:46:35,588 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:346530x0, quorum=127.0.0.1:58490, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:46:35,588 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34653-0x1003089ce700001 connected 2024-11-11T20:46:35,588 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T20:46:35,589 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T20:46:35,590 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T20:46:35,590 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:46:35,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34653 2024-11-11T20:46:35,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34653 2024-11-11T20:46:35,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34653 2024-11-11T20:46:35,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34653 2024-11-11T20:46:35,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34653 2024-11-11T20:46:35,608 DEBUG [M:0;51ca66f7ee3c:42863 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;51ca66f7ee3c:42863 2024-11-11T20:46:35,608 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/51ca66f7ee3c,42863,1731357995536 2024-11-11T20:46:35,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:46:35,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:46:35,610 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/51ca66f7ee3c,42863,1731357995536 2024-11-11T20:46:35,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T20:46:35,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:35,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:35,611 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T20:46:35,611 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/51ca66f7ee3c,42863,1731357995536 from backup master directory 2024-11-11T20:46:35,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:46:35,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/51ca66f7ee3c,42863,1731357995536 2024-11-11T20:46:35,612 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:46:35,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:46:35,612 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=51ca66f7ee3c,42863,1731357995536 2024-11-11T20:46:35,616 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/hbase.id] with ID: 32f19e65-b754-4d27-aa7c-bcc0d614ada8 2024-11-11T20:46:35,616 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/.tmp/hbase.id 2024-11-11T20:46:35,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:46:35,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:46:35,623 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/.tmp/hbase.id]:[hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/hbase.id] 2024-11-11T20:46:35,635 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:35,635 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T20:46:35,636 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T20:46:35,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:35,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:35,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:46:35,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:46:35,646 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:46:35,646 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T20:46:35,647 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:46:35,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:46:35,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:46:35,654 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store 2024-11-11T20:46:35,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:46:35,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:46:35,660 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:35,660 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:46:35,660 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:35,660 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:35,660 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:46:35,660 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:35,660 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:46:35,660 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731357995660Disabling compacts and flushes for region at 1731357995660Disabling writes for close at 1731357995660Writing region close event to WAL at 1731357995660Closed at 1731357995660 2024-11-11T20:46:35,661 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/.initializing 2024-11-11T20:46:35,661 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/WALs/51ca66f7ee3c,42863,1731357995536 2024-11-11T20:46:35,663 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C42863%2C1731357995536, suffix=, logDir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/WALs/51ca66f7ee3c,42863,1731357995536, archiveDir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/oldWALs, maxLogs=10 2024-11-11T20:46:35,663 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C42863%2C1731357995536.1731357995663 2024-11-11T20:46:35,668 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/WALs/51ca66f7ee3c,42863,1731357995536/51ca66f7ee3c%2C42863%2C1731357995536.1731357995663 2024-11-11T20:46:35,668 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40017:40017),(127.0.0.1/127.0.0.1:37095:37095)] 2024-11-11T20:46:35,669 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:46:35,669 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:35,669 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,669 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,670 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T20:46:35,672 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:35,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:35,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T20:46:35,673 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:35,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:46:35,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T20:46:35,675 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:35,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:46:35,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,676 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T20:46:35,676 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:35,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:46:35,677 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,677 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,678 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,679 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,679 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,680 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T20:46:35,681 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:46:35,683 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:46:35,684 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816152, jitterRate=0.03779187798500061}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T20:46:35,685 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731357995669Initializing all the Stores at 1731357995670 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357995670Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357995670Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357995670Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357995670Cleaning up temporary data from old regions at 1731357995679 (+9 ms)Region opened successfully at 1731357995685 (+6 ms) 2024-11-11T20:46:35,685 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T20:46:35,688 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51829c5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:46:35,689 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T20:46:35,689 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T20:46:35,689 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T20:46:35,689 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T20:46:35,690 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T20:46:35,690 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T20:46:35,690 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T20:46:35,692 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T20:46:35,693 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T20:46:35,694 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T20:46:35,694 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T20:46:35,695 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T20:46:35,696 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T20:46:35,696 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T20:46:35,697 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T20:46:35,698 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T20:46:35,698 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T20:46:35,699 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T20:46:35,701 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T20:46:35,702 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T20:46:35,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:46:35,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:46:35,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:35,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:35,703 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=51ca66f7ee3c,42863,1731357995536, sessionid=0x1003089ce700000, setting cluster-up flag (Was=false) 2024-11-11T20:46:35,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:35,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:35,708 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T20:46:35,709 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,42863,1731357995536 2024-11-11T20:46:35,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:35,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:35,714 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T20:46:35,715 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,42863,1731357995536 2024-11-11T20:46:35,717 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T20:46:35,719 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T20:46:35,719 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T20:46:35,719 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T20:46:35,720 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 51ca66f7ee3c,42863,1731357995536 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T20:46:35,721 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:46:35,721 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:46:35,721 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:46:35,721 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:46:35,721 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/51ca66f7ee3c:0, corePoolSize=10, maxPoolSize=10 2024-11-11T20:46:35,722 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,722 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:46:35,722 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,723 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731358025723 2024-11-11T20:46:35,724 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T20:46:35,724 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:46:35,724 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T20:46:35,724 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T20:46:35,724 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T20:46:35,724 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T20:46:35,724 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T20:46:35,724 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T20:46:35,724 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,725 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:35,725 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T20:46:35,725 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T20:46:35,725 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T20:46:35,725 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T20:46:35,726 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T20:46:35,726 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T20:46:35,726 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357995726,5,FailOnTimeoutGroup] 2024-11-11T20:46:35,726 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357995726,5,FailOnTimeoutGroup] 2024-11-11T20:46:35,726 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,726 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T20:46:35,726 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,726 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:46:35,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:46:35,732 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T20:46:35,732 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d 2024-11-11T20:46:35,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:46:35,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:46:35,738 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:35,739 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:46:35,740 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:46:35,740 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:35,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:35,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:46:35,742 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:46:35,742 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:35,742 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:35,742 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:46:35,743 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:46:35,743 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:35,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:35,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:46:35,745 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:46:35,745 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:35,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:35,746 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:46:35,747 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740 2024-11-11T20:46:35,747 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740 2024-11-11T20:46:35,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:46:35,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:46:35,750 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:46:35,751 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:46:35,754 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:46:35,754 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718783, jitterRate=-0.08602108061313629}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:46:35,755 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731357995738Initializing all the Stores at 1731357995738Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357995738Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357995739 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357995739Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357995739Cleaning up temporary data from old regions at 1731357995749 (+10 ms)Region opened successfully at 1731357995755 (+6 ms) 2024-11-11T20:46:35,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:46:35,756 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:46:35,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:46:35,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:46:35,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:46:35,756 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:46:35,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731357995755Disabling compacts and flushes for region at 1731357995755Disabling writes for close at 1731357995756 (+1 ms)Writing region close event to WAL at 1731357995756Closed at 1731357995756 2024-11-11T20:46:35,758 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:46:35,758 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T20:46:35,758 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T20:46:35,760 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:46:35,761 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T20:46:35,800 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(746): ClusterId : 32f19e65-b754-4d27-aa7c-bcc0d614ada8 2024-11-11T20:46:35,800 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T20:46:35,803 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T20:46:35,803 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T20:46:35,805 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T20:46:35,806 DEBUG [RS:0;51ca66f7ee3c:34653 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@317e39e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:46:35,820 DEBUG [RS:0;51ca66f7ee3c:34653 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;51ca66f7ee3c:34653 2024-11-11T20:46:35,820 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T20:46:35,820 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T20:46:35,820 DEBUG [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T20:46:35,821 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(2659): reportForDuty to master=51ca66f7ee3c,42863,1731357995536 with port=34653, startcode=1731357995580 2024-11-11T20:46:35,821 DEBUG [RS:0;51ca66f7ee3c:34653 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T20:46:35,823 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38227, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T20:46:35,823 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42863 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:35,823 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42863 {}] master.ServerManager(517): Registering regionserver=51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:35,825 DEBUG [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d 2024-11-11T20:46:35,825 DEBUG [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39195 2024-11-11T20:46:35,825 DEBUG [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T20:46:35,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:46:35,826 DEBUG [RS:0;51ca66f7ee3c:34653 {}] zookeeper.ZKUtil(111): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:35,826 WARN [RS:0;51ca66f7ee3c:34653 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:46:35,826 INFO [RS:0;51ca66f7ee3c:34653 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:46:35,826 DEBUG [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:35,827 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [51ca66f7ee3c,34653,1731357995580] 2024-11-11T20:46:35,829 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T20:46:35,830 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T20:46:35,831 INFO [RS:0;51ca66f7ee3c:34653 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T20:46:35,831 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,831 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T20:46:35,831 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T20:46:35,832 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:46:35,832 DEBUG [RS:0;51ca66f7ee3c:34653 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:46:35,833 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,833 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,833 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,833 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,833 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,833 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,34653,1731357995580-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:46:35,848 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T20:46:35,848 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,34653,1731357995580-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,848 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,848 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.Replication(171): 51ca66f7ee3c,34653,1731357995580 started 2024-11-11T20:46:35,860 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:35,860 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(1482): Serving as 51ca66f7ee3c,34653,1731357995580, RpcServer on 51ca66f7ee3c/172.17.0.2:34653, sessionid=0x1003089ce700001 2024-11-11T20:46:35,860 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T20:46:35,860 DEBUG [RS:0;51ca66f7ee3c:34653 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:35,861 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,34653,1731357995580' 2024-11-11T20:46:35,861 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T20:46:35,861 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T20:46:35,862 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T20:46:35,862 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T20:46:35,862 DEBUG [RS:0;51ca66f7ee3c:34653 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:35,862 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,34653,1731357995580' 2024-11-11T20:46:35,862 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T20:46:35,862 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T20:46:35,863 DEBUG [RS:0;51ca66f7ee3c:34653 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T20:46:35,863 INFO [RS:0;51ca66f7ee3c:34653 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T20:46:35,863 INFO [RS:0;51ca66f7ee3c:34653 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T20:46:35,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:35,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:35,911 WARN [51ca66f7ee3c:42863 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T20:46:35,965 INFO [RS:0;51ca66f7ee3c:34653 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C34653%2C1731357995580, suffix=, logDir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580, archiveDir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/oldWALs, maxLogs=32 2024-11-11T20:46:35,965 INFO [RS:0;51ca66f7ee3c:34653 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C34653%2C1731357995580.1731357995965 2024-11-11T20:46:35,971 INFO [RS:0;51ca66f7ee3c:34653 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731357995965 2024-11-11T20:46:35,971 DEBUG [RS:0;51ca66f7ee3c:34653 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37095:37095),(127.0.0.1/127.0.0.1:40017:40017)] 2024-11-11T20:46:36,162 DEBUG [51ca66f7ee3c:42863 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T20:46:36,162 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:36,165 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,34653,1731357995580, state=OPENING 2024-11-11T20:46:36,168 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T20:46:36,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:36,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:46:36,172 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:46:36,172 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:46:36,172 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:46:36,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,34653,1731357995580}] 2024-11-11T20:46:36,326 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T20:46:36,329 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60853, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T20:46:36,336 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T20:46:36,336 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:46:36,340 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C34653%2C1731357995580.meta, suffix=.meta, logDir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580, archiveDir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/oldWALs, maxLogs=32 2024-11-11T20:46:36,340 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C34653%2C1731357995580.meta.1731357996340.meta 2024-11-11T20:46:36,347 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.meta.1731357996340.meta 2024-11-11T20:46:36,348 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40017:40017),(127.0.0.1/127.0.0.1:37095:37095)] 2024-11-11T20:46:36,349 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:46:36,350 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T20:46:36,350 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T20:46:36,350 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T20:46:36,350 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T20:46:36,350 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:36,350 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T20:46:36,350 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T20:46:36,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:46:36,353 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:46:36,353 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:36,354 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:36,354 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:46:36,355 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:46:36,355 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:36,355 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:36,355 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:46:36,356 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:46:36,356 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:36,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:36,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:46:36,357 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:46:36,357 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:36,357 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:46:36,357 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:46:36,358 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740 2024-11-11T20:46:36,359 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740 2024-11-11T20:46:36,360 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:46:36,360 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:46:36,360 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:46:36,362 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:46:36,362 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824408, jitterRate=0.0482894629240036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:46:36,362 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T20:46:36,363 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731357996351Writing region info on filesystem at 1731357996351Initializing all the Stores at 1731357996351Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357996352 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357996352Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357996352Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731357996352Cleaning up temporary data from old regions at 1731357996360 (+8 ms)Running coprocessor post-open hooks at 1731357996362 (+2 ms)Region opened successfully at 1731357996363 (+1 ms) 2024-11-11T20:46:36,364 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731357996325 2024-11-11T20:46:36,366 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T20:46:36,366 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T20:46:36,367 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:36,368 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,34653,1731357995580, state=OPEN 2024-11-11T20:46:36,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:46:36,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:46:36,370 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:36,370 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:46:36,370 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:46:36,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T20:46:36,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,34653,1731357995580 in 198 msec 2024-11-11T20:46:36,376 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T20:46:36,376 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 615 msec 2024-11-11T20:46:36,377 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:46:36,377 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T20:46:36,378 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:46:36,379 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,34653,1731357995580, seqNum=-1] 2024-11-11T20:46:36,379 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:46:36,380 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52487, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:46:36,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 667 msec 2024-11-11T20:46:36,386 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731357996386, completionTime=-1 2024-11-11T20:46:36,386 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T20:46:36,386 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T20:46:36,388 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T20:46:36,388 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731358056388 2024-11-11T20:46:36,388 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731358116388 2024-11-11T20:46:36,388 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-11T20:46:36,389 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,42863,1731357995536-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:36,389 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,42863,1731357995536-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:36,389 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,42863,1731357995536-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:36,389 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-51ca66f7ee3c:42863, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:36,389 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:36,389 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:36,391 DEBUG [master/51ca66f7ee3c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T20:46:36,393 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.781sec 2024-11-11T20:46:36,393 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T20:46:36,393 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T20:46:36,393 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T20:46:36,393 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T20:46:36,393 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T20:46:36,393 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,42863,1731357995536-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:46:36,393 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,42863,1731357995536-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T20:46:36,396 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T20:46:36,396 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T20:46:36,396 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,42863,1731357995536-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:46:36,400 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30873421, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:46:36,400 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 51ca66f7ee3c,42863,-1 for getting cluster id 2024-11-11T20:46:36,400 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T20:46:36,402 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '32f19e65-b754-4d27-aa7c-bcc0d614ada8' 2024-11-11T20:46:36,402 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T20:46:36,402 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "32f19e65-b754-4d27-aa7c-bcc0d614ada8" 2024-11-11T20:46:36,402 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ff92123, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:46:36,403 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [51ca66f7ee3c,42863,-1] 2024-11-11T20:46:36,403 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T20:46:36,403 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:46:36,404 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53248, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T20:46:36,405 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36f52998, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:46:36,405 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:46:36,406 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,34653,1731357995580, seqNum=-1] 2024-11-11T20:46:36,407 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:46:36,408 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36986, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:46:36,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=51ca66f7ee3c,42863,1731357995536 2024-11-11T20:46:36,409 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:46:36,412 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T20:46:36,412 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T20:46:36,413 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 51ca66f7ee3c,42863,1731357995536 2024-11-11T20:46:36,414 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@141511ac 2024-11-11T20:46:36,414 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T20:46:36,415 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53264, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T20:46:36,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T20:46:36,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T20:46:36,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:46:36,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:46:36,418 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T20:46:36,418 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:36,419 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-11T20:46:36,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T20:46:36,420 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T20:46:36,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741835_1011 (size=405) 2024-11-11T20:46:36,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741835_1011 (size=405) 2024-11-11T20:46:36,429 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c43a6f7fd29b402ac6cd642b17dbc124, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d 2024-11-11T20:46:36,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741836_1012 (size=88) 2024-11-11T20:46:36,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741836_1012 (size=88) 2024-11-11T20:46:36,436 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:36,436 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing c43a6f7fd29b402ac6cd642b17dbc124, disabling compactions & flushes 2024-11-11T20:46:36,436 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:36,436 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:36,436 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. after waiting 0 ms 2024-11-11T20:46:36,436 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:36,436 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:36,436 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c43a6f7fd29b402ac6cd642b17dbc124: Waiting for close lock at 1731357996436Disabling compacts and flushes for region at 1731357996436Disabling writes for close at 1731357996436Writing region close event to WAL at 1731357996436Closed at 1731357996436 2024-11-11T20:46:36,438 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T20:46:36,438 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731357996438"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731357996438"}]},"ts":"1731357996438"} 2024-11-11T20:46:36,440 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T20:46:36,441 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T20:46:36,442 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731357996441"}]},"ts":"1731357996441"} 2024-11-11T20:46:36,444 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-11T20:46:36,444 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c43a6f7fd29b402ac6cd642b17dbc124, ASSIGN}] 2024-11-11T20:46:36,445 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c43a6f7fd29b402ac6cd642b17dbc124, ASSIGN 2024-11-11T20:46:36,447 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c43a6f7fd29b402ac6cd642b17dbc124, ASSIGN; state=OFFLINE, location=51ca66f7ee3c,34653,1731357995580; forceNewPlan=false, retain=false 2024-11-11T20:46:36,598 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c43a6f7fd29b402ac6cd642b17dbc124, regionState=OPENING, regionLocation=51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:36,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c43a6f7fd29b402ac6cd642b17dbc124, ASSIGN because future has completed 2024-11-11T20:46:36,602 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c43a6f7fd29b402ac6cd642b17dbc124, server=51ca66f7ee3c,34653,1731357995580}] 2024-11-11T20:46:36,766 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:36,766 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c43a6f7fd29b402ac6cd642b17dbc124, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:46:36,766 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,767 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:46:36,767 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,767 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,769 INFO [StoreOpener-c43a6f7fd29b402ac6cd642b17dbc124-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,770 INFO [StoreOpener-c43a6f7fd29b402ac6cd642b17dbc124-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c43a6f7fd29b402ac6cd642b17dbc124 columnFamilyName info 2024-11-11T20:46:36,770 DEBUG [StoreOpener-c43a6f7fd29b402ac6cd642b17dbc124-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:46:36,771 INFO [StoreOpener-c43a6f7fd29b402ac6cd642b17dbc124-1 {}] regionserver.HStore(327): Store=c43a6f7fd29b402ac6cd642b17dbc124/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:46:36,771 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,772 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,772 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,772 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,773 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,774 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,776 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:46:36,776 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c43a6f7fd29b402ac6cd642b17dbc124; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776243, jitterRate=-0.012956470251083374}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T20:46:36,777 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:46:36,777 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c43a6f7fd29b402ac6cd642b17dbc124: Running coprocessor pre-open hook at 1731357996767Writing region info on filesystem at 1731357996767Initializing all the Stores at 1731357996768 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731357996768Cleaning up temporary data from old regions at 1731357996773 (+5 ms)Running coprocessor post-open hooks at 1731357996777 (+4 ms)Region opened successfully at 1731357996777 2024-11-11T20:46:36,778 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124., pid=6, masterSystemTime=1731357996757 2024-11-11T20:46:36,780 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:36,780 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:36,781 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c43a6f7fd29b402ac6cd642b17dbc124, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,34653,1731357995580 2024-11-11T20:46:36,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c43a6f7fd29b402ac6cd642b17dbc124, server=51ca66f7ee3c,34653,1731357995580 because future has completed 2024-11-11T20:46:36,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T20:46:36,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c43a6f7fd29b402ac6cd642b17dbc124, server=51ca66f7ee3c,34653,1731357995580 in 183 msec 2024-11-11T20:46:36,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T20:46:36,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c43a6f7fd29b402ac6cd642b17dbc124, ASSIGN in 345 msec 2024-11-11T20:46:36,793 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T20:46:36,794 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731357996793"}]},"ts":"1731357996793"} 2024-11-11T20:46:36,796 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-11T20:46:36,797 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T20:46:36,799 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 381 msec 2024-11-11T20:46:36,830 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:46:36,830 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T20:46:36,831 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T20:46:36,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:36,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:37,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:37,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:38,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:38,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:39,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:39,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:40,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:40,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:41,855 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T20:46:41,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:41,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:46:41,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:41,891 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T20:46:41,891 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-11T20:46:42,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:42,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:43,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:43,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:44,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:44,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:45,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:45,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T20:46:46,429 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T20:46:46,429 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-11T20:46:46,438 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:46:46,438 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:46,441 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124., hostname=51ca66f7ee3c,34653,1731357995580, seqNum=2] 2024-11-11T20:46:46,446 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:46:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:46:46,452 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T20:46:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T20:46:46,453 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T20:46:46,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T20:46:46,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34653 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-11T20:46:46,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:46,615 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c43a6f7fd29b402ac6cd642b17dbc124 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T20:46:46,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/0ad15d1bf8594161a8a37482906456b9 is 1080, key is row0001/info:/1731358006442/Put/seqid=0 2024-11-11T20:46:46,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741837_1013 (size=6033) 2024-11-11T20:46:46,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741837_1013 (size=6033) 2024-11-11T20:46:46,637 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/0ad15d1bf8594161a8a37482906456b9 2024-11-11T20:46:46,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/0ad15d1bf8594161a8a37482906456b9 as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/0ad15d1bf8594161a8a37482906456b9 2024-11-11T20:46:46,648 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/0ad15d1bf8594161a8a37482906456b9, entries=1, sequenceid=5, filesize=5.9 K 2024-11-11T20:46:46,649 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c43a6f7fd29b402ac6cd642b17dbc124 in 34ms, sequenceid=5, compaction requested=false 2024-11-11T20:46:46,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c43a6f7fd29b402ac6cd642b17dbc124: 2024-11-11T20:46:46,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:46,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-11T20:46:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-11T20:46:46,657 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-11T20:46:46,657 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-11T20:46:46,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 211 msec 2024-11-11T20:46:46,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:46,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:47,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:47,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:48,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:48,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:49,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:49,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:50,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:50,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:51,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:51,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:52,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:52,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:53,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:53,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:54,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:54,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:55,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:55,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T20:46:56,510 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T20:46:56,517 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:46:56,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:46:56,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-11T20:46:56,523 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T20:46:56,525 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T20:46:56,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T20:46:56,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34653 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-11T20:46:56,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:56,682 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing c43a6f7fd29b402ac6cd642b17dbc124 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T20:46:56,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/ffb7fbfdc79e492da6a4bc6ab241508a is 1080, key is row0002/info:/1731358016513/Put/seqid=0 2024-11-11T20:46:56,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741838_1014 (size=6033) 2024-11-11T20:46:56,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741838_1014 (size=6033) 2024-11-11T20:46:56,707 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/ffb7fbfdc79e492da6a4bc6ab241508a 2024-11-11T20:46:56,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/ffb7fbfdc79e492da6a4bc6ab241508a as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ffb7fbfdc79e492da6a4bc6ab241508a 2024-11-11T20:46:56,722 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ffb7fbfdc79e492da6a4bc6ab241508a, entries=1, sequenceid=9, filesize=5.9 K 2024-11-11T20:46:56,723 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c43a6f7fd29b402ac6cd642b17dbc124 in 41ms, sequenceid=9, compaction requested=false 2024-11-11T20:46:56,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for c43a6f7fd29b402ac6cd642b17dbc124: 2024-11-11T20:46:56,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:46:56,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-11T20:46:56,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-11T20:46:56,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-11T20:46:56,729 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 201 msec 2024-11-11T20:46:56,731 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 212 msec 2024-11-11T20:46:56,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:56,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:57,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:57,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:58,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:58,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:59,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:46:59,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:00,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:00,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:01,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:01,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:02,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:02,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 after 68070ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:47:02,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:02,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta after 68059ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T20:47:03,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:03,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:04,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:04,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:05,518 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T20:47:05,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:05,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:06,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-11T20:47:06,530 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T20:47:06,537 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C34653%2C1731357995580.1731358026536 2024-11-11T20:47:06,546 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:06,547 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:06,547 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:06,547 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:06,547 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:06,548 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731357995965 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731358026536 2024-11-11T20:47:06,549 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40017:40017),(127.0.0.1/127.0.0.1:37095:37095)] 2024-11-11T20:47:06,549 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731357995965 is not closed yet, will try archiving it next time 2024-11-11T20:47:06,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741833_1009 (size=5546) 2024-11-11T20:47:06,551 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:47:06,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741833_1009 (size=5546) 2024-11-11T20:47:06,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:47:06,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-11T20:47:06,554 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T20:47:06,555 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T20:47:06,555 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T20:47:06,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34653 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-11T20:47:06,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:47:06,712 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing c43a6f7fd29b402ac6cd642b17dbc124 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T20:47:06,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/ccbf126ecd8d4891bb237f0a15a5af4e is 1080, key is row0003/info:/1731358026533/Put/seqid=0 2024-11-11T20:47:06,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741840_1016 (size=6033) 2024-11-11T20:47:06,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741840_1016 (size=6033) 2024-11-11T20:47:06,726 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/ccbf126ecd8d4891bb237f0a15a5af4e 2024-11-11T20:47:06,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/ccbf126ecd8d4891bb237f0a15a5af4e as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ccbf126ecd8d4891bb237f0a15a5af4e 2024-11-11T20:47:06,738 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ccbf126ecd8d4891bb237f0a15a5af4e, entries=1, sequenceid=13, filesize=5.9 K 2024-11-11T20:47:06,739 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c43a6f7fd29b402ac6cd642b17dbc124 in 27ms, sequenceid=13, compaction requested=true 2024-11-11T20:47:06,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for c43a6f7fd29b402ac6cd642b17dbc124: 2024-11-11T20:47:06,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:47:06,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-11T20:47:06,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-11T20:47:06,744 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-11T20:47:06,744 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-11T20:47:06,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 194 msec 2024-11-11T20:47:06,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:06,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:07,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:07,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:08,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:08,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:09,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:09,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:10,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:10,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:11,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:11,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:12,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:12,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:13,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:13,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:14,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:14,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:15,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:15,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:16,461 INFO [master/51ca66f7ee3c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T20:47:16,461 INFO [master/51ca66f7ee3c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T20:47:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-11T20:47:16,609 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T20:47:16,609 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:47:16,610 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:47:16,610 DEBUG [Time-limited test {}] regionserver.HStore(1541): c43a6f7fd29b402ac6cd642b17dbc124/info is initiating minor compaction (all files) 2024-11-11T20:47:16,610 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T20:47:16,610 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:16,611 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of c43a6f7fd29b402ac6cd642b17dbc124/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:47:16,611 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/0ad15d1bf8594161a8a37482906456b9, hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ffb7fbfdc79e492da6a4bc6ab241508a, hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ccbf126ecd8d4891bb237f0a15a5af4e] into tmpdir=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp, totalSize=17.7 K 2024-11-11T20:47:16,611 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0ad15d1bf8594161a8a37482906456b9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731358006442 2024-11-11T20:47:16,611 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ffb7fbfdc79e492da6a4bc6ab241508a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731358016513 2024-11-11T20:47:16,612 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ccbf126ecd8d4891bb237f0a15a5af4e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731358026533 2024-11-11T20:47:16,624 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): c43a6f7fd29b402ac6cd642b17dbc124#info#compaction#44 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:47:16,624 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/76c7229eedca4a1eb05b60468c531f2a is 1080, key is row0001/info:/1731358006442/Put/seqid=0 2024-11-11T20:47:16,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741841_1017 (size=8296) 2024-11-11T20:47:16,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741841_1017 (size=8296) 2024-11-11T20:47:16,637 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/76c7229eedca4a1eb05b60468c531f2a as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/76c7229eedca4a1eb05b60468c531f2a 2024-11-11T20:47:16,644 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c43a6f7fd29b402ac6cd642b17dbc124/info of c43a6f7fd29b402ac6cd642b17dbc124 into 76c7229eedca4a1eb05b60468c531f2a(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:47:16,644 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for c43a6f7fd29b402ac6cd642b17dbc124: 2024-11-11T20:47:16,647 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C34653%2C1731357995580.1731358036647 2024-11-11T20:47:16,652 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:16,653 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:16,653 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:16,653 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:16,653 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:16,653 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731358026536 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731358036647 2024-11-11T20:47:16,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741839_1015 (size=2520) 2024-11-11T20:47:16,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741839_1015 (size=2520) 2024-11-11T20:47:16,657 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731357995965 to hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/oldWALs/51ca66f7ee3c%2C34653%2C1731357995580.1731357995965 2024-11-11T20:47:16,660 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37095:37095),(127.0.0.1/127.0.0.1:40017:40017)] 2024-11-11T20:47:16,661 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:47:16,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:47:16,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-11T20:47:16,663 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T20:47:16,664 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T20:47:16,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T20:47:16,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34653 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-11T20:47:16,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:47:16,818 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing c43a6f7fd29b402ac6cd642b17dbc124 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T20:47:16,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/e368fb7240934772b104fa86bc74eba2 is 1080, key is row0000/info:/1731358036645/Put/seqid=0 2024-11-11T20:47:16,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741843_1019 (size=6033) 2024-11-11T20:47:16,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741843_1019 (size=6033) 2024-11-11T20:47:16,829 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/e368fb7240934772b104fa86bc74eba2 2024-11-11T20:47:16,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/e368fb7240934772b104fa86bc74eba2 as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/e368fb7240934772b104fa86bc74eba2 2024-11-11T20:47:16,842 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/e368fb7240934772b104fa86bc74eba2, entries=1, sequenceid=18, filesize=5.9 K 2024-11-11T20:47:16,843 INFO [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c43a6f7fd29b402ac6cd642b17dbc124 in 25ms, sequenceid=18, compaction requested=false 2024-11-11T20:47:16,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for c43a6f7fd29b402ac6cd642b17dbc124: 2024-11-11T20:47:16,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:47:16,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-11T20:47:16,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-11T20:47:16,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-11T20:47:16,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-11-11T20:47:16,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-11-11T20:47:16,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:16,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:17,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:17,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:18,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:18,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:19,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:19,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:20,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:20,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:21,767 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c43a6f7fd29b402ac6cd642b17dbc124, had cached 0 bytes from a total of 14329 2024-11-11T20:47:21,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:21,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:22,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:22,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:23,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:23,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:24,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:24,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:25,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:25,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:26,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-11T20:47:26,729 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T20:47:26,735 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C34653%2C1731357995580.1731358046734 2024-11-11T20:47:26,744 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,744 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,744 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,744 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,744 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,744 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731358036647 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731358046734 2024-11-11T20:47:26,745 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37095:37095),(127.0.0.1/127.0.0.1:40017:40017)] 2024-11-11T20:47:26,745 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731358036647 is not closed yet, will try archiving it next time 2024-11-11T20:47:26,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T20:47:26,745 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/WALs/51ca66f7ee3c,34653,1731357995580/51ca66f7ee3c%2C34653%2C1731357995580.1731358026536 to hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/oldWALs/51ca66f7ee3c%2C34653%2C1731357995580.1731358026536 2024-11-11T20:47:26,745 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:47:26,745 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:47:26,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:47:26,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:47:26,746 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T20:47:26,746 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1136441466, stopped=false 2024-11-11T20:47:26,746 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T20:47:26,746 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=51ca66f7ee3c,42863,1731357995536 2024-11-11T20:47:26,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741842_1018 (size=2026) 2024-11-11T20:47:26,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741842_1018 (size=2026) 2024-11-11T20:47:26,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:47:26,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:47:26,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:26,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:26,748 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:47:26,748 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:47:26,749 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:47:26,749 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:47:26,749 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '51ca66f7ee3c,34653,1731357995580' ***** 2024-11-11T20:47:26,749 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T20:47:26,749 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:47:26,749 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:47:26,749 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T20:47:26,749 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T20:47:26,750 INFO [RS:0;51ca66f7ee3c:34653 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T20:47:26,750 INFO [RS:0;51ca66f7ee3c:34653 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T20:47:26,750 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(3091): Received CLOSE for c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:47:26,750 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(959): stopping server 51ca66f7ee3c,34653,1731357995580 2024-11-11T20:47:26,750 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:47:26,750 INFO [RS:0;51ca66f7ee3c:34653 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;51ca66f7ee3c:34653. 2024-11-11T20:47:26,750 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c43a6f7fd29b402ac6cd642b17dbc124, disabling compactions & flushes 2024-11-11T20:47:26,750 DEBUG [RS:0;51ca66f7ee3c:34653 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:47:26,750 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:47:26,750 DEBUG [RS:0;51ca66f7ee3c:34653 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:47:26,750 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:47:26,751 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. after waiting 0 ms 2024-11-11T20:47:26,751 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:47:26,751 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T20:47:26,751 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T20:47:26,751 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T20:47:26,751 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T20:47:26,751 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c43a6f7fd29b402ac6cd642b17dbc124 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T20:47:26,751 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T20:47:26,751 DEBUG [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(1325): Online Regions={c43a6f7fd29b402ac6cd642b17dbc124=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T20:47:26,751 DEBUG [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c43a6f7fd29b402ac6cd642b17dbc124 2024-11-11T20:47:26,751 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:47:26,751 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:47:26,751 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:47:26,751 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:47:26,751 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:47:26,751 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-11T20:47:26,756 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/0095c05dadbe462a8ee8d38a381031c0 is 1080, key is row0001/info:/1731358046731/Put/seqid=0 2024-11-11T20:47:26,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741845_1021 (size=6033) 2024-11-11T20:47:26,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741845_1021 (size=6033) 2024-11-11T20:47:26,761 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/0095c05dadbe462a8ee8d38a381031c0 2024-11-11T20:47:26,767 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/.tmp/info/0095c05dadbe462a8ee8d38a381031c0 as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/0095c05dadbe462a8ee8d38a381031c0 2024-11-11T20:47:26,769 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/.tmp/info/672d177a094647deb0c4ae454a94f4ce is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124./info:regioninfo/1731357996781/Put/seqid=0 2024-11-11T20:47:26,773 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/0095c05dadbe462a8ee8d38a381031c0, entries=1, sequenceid=22, filesize=5.9 K 2024-11-11T20:47:26,774 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c43a6f7fd29b402ac6cd642b17dbc124 in 22ms, sequenceid=22, compaction requested=true 2024-11-11T20:47:26,774 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/0ad15d1bf8594161a8a37482906456b9, hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ffb7fbfdc79e492da6a4bc6ab241508a, hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ccbf126ecd8d4891bb237f0a15a5af4e] to archive 2024-11-11T20:47:26,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741846_1022 (size=7308) 2024-11-11T20:47:26,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741846_1022 (size=7308) 2024-11-11T20:47:26,775 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/.tmp/info/672d177a094647deb0c4ae454a94f4ce 2024-11-11T20:47:26,775 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T20:47:26,776 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/0ad15d1bf8594161a8a37482906456b9 to hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/0ad15d1bf8594161a8a37482906456b9 2024-11-11T20:47:26,777 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ffb7fbfdc79e492da6a4bc6ab241508a to hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ffb7fbfdc79e492da6a4bc6ab241508a 2024-11-11T20:47:26,779 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ccbf126ecd8d4891bb237f0a15a5af4e to hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/info/ccbf126ecd8d4891bb237f0a15a5af4e 2024-11-11T20:47:26,779 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=51ca66f7ee3c:42863 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T20:47:26,779 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0ad15d1bf8594161a8a37482906456b9=6033, ffb7fbfdc79e492da6a4bc6ab241508a=6033, ccbf126ecd8d4891bb237f0a15a5af4e=6033] 2024-11-11T20:47:26,783 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c43a6f7fd29b402ac6cd642b17dbc124/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-11T20:47:26,784 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:47:26,784 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c43a6f7fd29b402ac6cd642b17dbc124: Waiting for close lock at 1731358046750Running coprocessor pre-close hooks at 1731358046750Disabling compacts and flushes for region at 1731358046750Disabling writes for close at 1731358046751 (+1 ms)Obtaining lock to block concurrent updates at 1731358046751Preparing flush snapshotting stores in c43a6f7fd29b402ac6cd642b17dbc124 at 1731358046751Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731358046751Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. at 1731358046752 (+1 ms)Flushing c43a6f7fd29b402ac6cd642b17dbc124/info: creating writer at 1731358046752Flushing c43a6f7fd29b402ac6cd642b17dbc124/info: appending metadata at 1731358046755 (+3 ms)Flushing c43a6f7fd29b402ac6cd642b17dbc124/info: closing flushed file at 1731358046755Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42a58628: reopening flushed file at 1731358046766 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c43a6f7fd29b402ac6cd642b17dbc124 in 22ms, sequenceid=22, compaction requested=true at 1731358046774 (+8 ms)Writing region close event to WAL at 1731358046780 (+6 ms)Running coprocessor post-close hooks at 1731358046783 (+3 ms)Closed at 1731358046784 (+1 ms) 2024-11-11T20:47:26,784 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731357996415.c43a6f7fd29b402ac6cd642b17dbc124. 2024-11-11T20:47:26,795 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/.tmp/ns/da69d74a7526471fbce1375495fe2c69 is 43, key is default/ns:d/1731357996381/Put/seqid=0 2024-11-11T20:47:26,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741847_1023 (size=5153) 2024-11-11T20:47:26,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741847_1023 (size=5153) 2024-11-11T20:47:26,800 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/.tmp/ns/da69d74a7526471fbce1375495fe2c69 2024-11-11T20:47:26,818 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/.tmp/table/53def2139df94a6dbd48a0466f64ca16 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731357996793/Put/seqid=0 2024-11-11T20:47:26,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741848_1024 (size=5508) 2024-11-11T20:47:26,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741848_1024 (size=5508) 2024-11-11T20:47:26,823 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/.tmp/table/53def2139df94a6dbd48a0466f64ca16 2024-11-11T20:47:26,829 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/.tmp/info/672d177a094647deb0c4ae454a94f4ce as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/info/672d177a094647deb0c4ae454a94f4ce 2024-11-11T20:47:26,830 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T20:47:26,833 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T20:47:26,833 INFO [regionserver/51ca66f7ee3c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T20:47:26,835 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/info/672d177a094647deb0c4ae454a94f4ce, entries=10, sequenceid=11, filesize=7.1 K 2024-11-11T20:47:26,836 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/.tmp/ns/da69d74a7526471fbce1375495fe2c69 as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/ns/da69d74a7526471fbce1375495fe2c69 2024-11-11T20:47:26,841 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/ns/da69d74a7526471fbce1375495fe2c69, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T20:47:26,842 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/.tmp/table/53def2139df94a6dbd48a0466f64ca16 as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/table/53def2139df94a6dbd48a0466f64ca16 2024-11-11T20:47:26,847 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/table/53def2139df94a6dbd48a0466f64ca16, entries=2, sequenceid=11, filesize=5.4 K 2024-11-11T20:47:26,848 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 97ms, sequenceid=11, compaction requested=false 2024-11-11T20:47:26,853 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T20:47:26,853 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:47:26,854 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:47:26,854 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731358046751Running coprocessor pre-close hooks at 1731358046751Disabling compacts and flushes for region at 1731358046751Disabling writes for close at 1731358046751Obtaining lock to block concurrent updates at 1731358046751Preparing flush snapshotting stores in 1588230740 at 1731358046751Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731358046752 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731358046753 (+1 ms)Flushing 1588230740/info: creating writer at 1731358046753Flushing 1588230740/info: appending metadata at 1731358046769 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731358046769Flushing 1588230740/ns: creating writer at 1731358046780 (+11 ms)Flushing 1588230740/ns: appending metadata at 1731358046795 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731358046795Flushing 1588230740/table: creating writer at 1731358046805 (+10 ms)Flushing 1588230740/table: appending metadata at 1731358046818 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731358046818Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d44a091: reopening flushed file at 1731358046829 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@500cccb2: reopening flushed file at 1731358046835 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6436bcc7: reopening flushed file at 1731358046841 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 97ms, sequenceid=11, compaction requested=false at 1731358046848 (+7 ms)Writing region close event to WAL at 1731358046849 (+1 ms)Running coprocessor post-close hooks at 1731358046853 (+4 ms)Closed at 1731358046853 2024-11-11T20:47:26,854 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T20:47:26,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:26,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:26,951 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(976): stopping server 51ca66f7ee3c,34653,1731357995580; all regions closed. 2024-11-11T20:47:26,952 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,952 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,952 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,952 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,952 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741834_1010 (size=3306) 2024-11-11T20:47:26,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741834_1010 (size=3306) 2024-11-11T20:47:26,958 DEBUG [RS:0;51ca66f7ee3c:34653 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/oldWALs 2024-11-11T20:47:26,958 INFO [RS:0;51ca66f7ee3c:34653 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C34653%2C1731357995580.meta:.meta(num 1731357996340) 2024-11-11T20:47:26,958 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,959 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,959 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,959 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,959 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:26,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741844_1020 (size=1252) 2024-11-11T20:47:26,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741844_1020 (size=1252) 2024-11-11T20:47:26,966 DEBUG [RS:0;51ca66f7ee3c:34653 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/oldWALs 2024-11-11T20:47:26,966 INFO [RS:0;51ca66f7ee3c:34653 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C34653%2C1731357995580:(num 1731358046734) 2024-11-11T20:47:26,966 DEBUG [RS:0;51ca66f7ee3c:34653 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:47:26,966 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:47:26,966 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:47:26,967 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.ChoreService(370): Chore service for: regionserver/51ca66f7ee3c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T20:47:26,967 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:47:26,967 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:47:26,967 INFO [RS:0;51ca66f7ee3c:34653 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34653 2024-11-11T20:47:26,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/51ca66f7ee3c,34653,1731357995580 2024-11-11T20:47:26,969 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:47:26,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:47:26,970 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [51ca66f7ee3c,34653,1731357995580] 2024-11-11T20:47:26,970 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/51ca66f7ee3c,34653,1731357995580 already deleted, retry=false 2024-11-11T20:47:26,970 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 51ca66f7ee3c,34653,1731357995580 expired; onlineServers=0 2024-11-11T20:47:26,970 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '51ca66f7ee3c,42863,1731357995536' ***** 2024-11-11T20:47:26,970 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T20:47:26,970 INFO [M:0;51ca66f7ee3c:42863 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:47:26,970 INFO [M:0;51ca66f7ee3c:42863 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:47:26,970 DEBUG [M:0;51ca66f7ee3c:42863 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T20:47:26,970 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T20:47:26,970 DEBUG [M:0;51ca66f7ee3c:42863 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T20:47:26,970 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357995726 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731357995726,5,FailOnTimeoutGroup] 2024-11-11T20:47:26,971 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357995726 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731357995726,5,FailOnTimeoutGroup] 2024-11-11T20:47:26,971 INFO [M:0;51ca66f7ee3c:42863 {}] hbase.ChoreService(370): Chore service for: master/51ca66f7ee3c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T20:47:26,971 INFO [M:0;51ca66f7ee3c:42863 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:47:26,971 DEBUG [M:0;51ca66f7ee3c:42863 {}] master.HMaster(1795): Stopping service threads 2024-11-11T20:47:26,971 INFO [M:0;51ca66f7ee3c:42863 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T20:47:26,971 INFO [M:0;51ca66f7ee3c:42863 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:47:26,971 INFO [M:0;51ca66f7ee3c:42863 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T20:47:26,971 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T20:47:26,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T20:47:26,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:26,971 DEBUG [M:0;51ca66f7ee3c:42863 {}] zookeeper.ZKUtil(347): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T20:47:26,971 WARN [M:0;51ca66f7ee3c:42863 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T20:47:26,972 INFO [M:0;51ca66f7ee3c:42863 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/.lastflushedseqids 2024-11-11T20:47:26,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741849_1025 (size=130) 2024-11-11T20:47:26,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741849_1025 (size=130) 2024-11-11T20:47:26,977 INFO [M:0;51ca66f7ee3c:42863 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T20:47:26,977 INFO [M:0;51ca66f7ee3c:42863 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T20:47:26,977 DEBUG [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:47:26,977 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:47:26,977 DEBUG [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:47:26,977 DEBUG [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:47:26,977 DEBUG [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:47:26,977 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.57 KB heapSize=54.96 KB 2024-11-11T20:47:26,991 DEBUG [M:0;51ca66f7ee3c:42863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e07ab683905b4b6d967101bbd4485b6f is 82, key is hbase:meta,,1/info:regioninfo/1731357996367/Put/seqid=0 2024-11-11T20:47:26,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741850_1026 (size=5672) 2024-11-11T20:47:26,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741850_1026 (size=5672) 2024-11-11T20:47:26,996 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e07ab683905b4b6d967101bbd4485b6f 2024-11-11T20:47:27,015 DEBUG [M:0;51ca66f7ee3c:42863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f2136e9d0894616882875b3e7528713 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731357996798/Put/seqid=0 2024-11-11T20:47:27,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741851_1027 (size=7820) 2024-11-11T20:47:27,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741851_1027 (size=7820) 2024-11-11T20:47:27,020 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.96 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f2136e9d0894616882875b3e7528713 2024-11-11T20:47:27,025 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5f2136e9d0894616882875b3e7528713 2024-11-11T20:47:27,038 DEBUG [M:0;51ca66f7ee3c:42863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/de2ec171403e413483ee4c43d937dbbe is 69, key is 51ca66f7ee3c,34653,1731357995580/rs:state/1731357995823/Put/seqid=0 2024-11-11T20:47:27,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741852_1028 (size=5156) 2024-11-11T20:47:27,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741852_1028 (size=5156) 2024-11-11T20:47:27,043 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/de2ec171403e413483ee4c43d937dbbe 2024-11-11T20:47:27,060 DEBUG [M:0;51ca66f7ee3c:42863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/988041a5a0914a28a0517fc42d20f90a is 52, key is load_balancer_on/state:d/1731357996411/Put/seqid=0 2024-11-11T20:47:27,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741853_1029 (size=5056) 2024-11-11T20:47:27,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741853_1029 (size=5056) 2024-11-11T20:47:27,066 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/988041a5a0914a28a0517fc42d20f90a 2024-11-11T20:47:27,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:47:27,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34653-0x1003089ce700001, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:47:27,070 INFO [RS:0;51ca66f7ee3c:34653 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:47:27,070 INFO [RS:0;51ca66f7ee3c:34653 {}] regionserver.HRegionServer(1031): Exiting; stopping=51ca66f7ee3c,34653,1731357995580; zookeeper connection closed. 2024-11-11T20:47:27,070 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@fc7ef3b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@fc7ef3b 2024-11-11T20:47:27,070 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T20:47:27,071 DEBUG [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e07ab683905b4b6d967101bbd4485b6f as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e07ab683905b4b6d967101bbd4485b6f 2024-11-11T20:47:27,076 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e07ab683905b4b6d967101bbd4485b6f, entries=8, sequenceid=121, filesize=5.5 K 2024-11-11T20:47:27,077 DEBUG [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f2136e9d0894616882875b3e7528713 as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5f2136e9d0894616882875b3e7528713 2024-11-11T20:47:27,082 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5f2136e9d0894616882875b3e7528713 2024-11-11T20:47:27,082 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5f2136e9d0894616882875b3e7528713, entries=14, sequenceid=121, filesize=7.6 K 2024-11-11T20:47:27,083 DEBUG [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/de2ec171403e413483ee4c43d937dbbe as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/de2ec171403e413483ee4c43d937dbbe 2024-11-11T20:47:27,088 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/de2ec171403e413483ee4c43d937dbbe, entries=1, sequenceid=121, filesize=5.0 K 2024-11-11T20:47:27,089 DEBUG [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/988041a5a0914a28a0517fc42d20f90a as hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/988041a5a0914a28a0517fc42d20f90a 2024-11-11T20:47:27,093 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39195/user/jenkins/test-data/7395ca11-80c9-4356-804b-24f241c0082d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/988041a5a0914a28a0517fc42d20f90a, entries=1, sequenceid=121, filesize=4.9 K 2024-11-11T20:47:27,094 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.57 KB/44611, heapSize ~54.90 KB/56216, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=121, compaction requested=false 2024-11-11T20:47:27,096 INFO [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:47:27,096 DEBUG [M:0;51ca66f7ee3c:42863 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731358046977Disabling compacts and flushes for region at 1731358046977Disabling writes for close at 1731358046977Obtaining lock to block concurrent updates at 1731358046977Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731358046977Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44611, getHeapSize=56216, getOffHeapSize=0, getCellsCount=140 at 1731358046978 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731358046978Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731358046978Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731358046990 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731358046991 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731358047001 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731358047015 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731358047015Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731358047025 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731358047037 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731358047037Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731358047047 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731358047060 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731358047060Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23f88b9b: reopening flushed file at 1731358047070 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a8e7047: reopening flushed file at 1731358047076 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b2b85a4: reopening flushed file at 1731358047082 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20d51ebd: reopening flushed file at 1731358047088 (+6 ms)Finished flush of dataSize ~43.57 KB/44611, heapSize ~54.90 KB/56216, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=121, compaction requested=false at 1731358047094 (+6 ms)Writing region close event to WAL at 1731358047096 (+2 ms)Closed at 1731358047096 2024-11-11T20:47:27,096 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:27,096 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:27,096 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:27,097 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:27,097 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:47:27,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43291 is added to blk_1073741830_1006 (size=53008) 2024-11-11T20:47:27,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41687 is added to blk_1073741830_1006 (size=53008) 2024-11-11T20:47:27,099 INFO [M:0;51ca66f7ee3c:42863 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T20:47:27,099 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:47:27,099 INFO [M:0;51ca66f7ee3c:42863 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42863 2024-11-11T20:47:27,099 INFO [M:0;51ca66f7ee3c:42863 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:47:27,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:47:27,201 INFO [M:0;51ca66f7ee3c:42863 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:47:27,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42863-0x1003089ce700000, quorum=127.0.0.1:58490, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:47:27,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ca63b58{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:47:27,205 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2719a663{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:47:27,205 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:47:27,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e21aaf2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:47:27,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@222377c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/hadoop.log.dir/,STOPPED} 2024-11-11T20:47:27,208 WARN [BP-1837961949-172.17.0.2-1731357994975 heartbeating to localhost/127.0.0.1:39195 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:47:27,208 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:47:27,208 WARN [BP-1837961949-172.17.0.2-1731357994975 heartbeating to localhost/127.0.0.1:39195 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1837961949-172.17.0.2-1731357994975 (Datanode Uuid 57a8f8c0-e0d1-4ab0-8cf2-b22a9c7c64f5) service to localhost/127.0.0.1:39195 2024-11-11T20:47:27,208 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:47:27,209 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/data/data3/current/BP-1837961949-172.17.0.2-1731357994975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:47:27,209 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/data/data4/current/BP-1837961949-172.17.0.2-1731357994975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:47:27,209 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:47:27,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20ad0bae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:47:27,212 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@df90952{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:47:27,212 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:47:27,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21edc7fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:47:27,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30c9629f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/hadoop.log.dir/,STOPPED} 2024-11-11T20:47:27,214 WARN [BP-1837961949-172.17.0.2-1731357994975 heartbeating to localhost/127.0.0.1:39195 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:47:27,214 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:47:27,214 WARN [BP-1837961949-172.17.0.2-1731357994975 heartbeating to localhost/127.0.0.1:39195 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1837961949-172.17.0.2-1731357994975 (Datanode Uuid 3e5b2e5d-d795-4486-9204-0538e1323898) service to localhost/127.0.0.1:39195 2024-11-11T20:47:27,214 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:47:27,214 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/data/data1/current/BP-1837961949-172.17.0.2-1731357994975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:47:27,215 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/cluster_83adb649-b2fb-daa2-0c63-27c767e38a69/data/data2/current/BP-1837961949-172.17.0.2-1731357994975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:47:27,215 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:47:27,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3cb009a1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:47:27,221 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6349ff4f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:47:27,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:47:27,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a5372a5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:47:27,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@642a12de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/hadoop.log.dir/,STOPPED} 2024-11-11T20:47:27,227 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T20:47:27,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T20:47:27,253 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 180) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39195 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39195 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39195 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:39195 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39195 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39195 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/51ca66f7ee3c:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39195 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39195 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=168 (was 127) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4117 (was 3975) - AvailableMemoryMB LEAK? - 2024-11-11T20:47:27,260 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=168, ProcessCount=11, AvailableMemoryMB=4117 2024-11-11T20:47:27,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T20:47:27,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/hadoop.log.dir so I do NOT create it in target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027 2024-11-11T20:47:27,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd806552-9445-b77f-3c8d-84b37576ad76/hadoop.tmp.dir so I do NOT create it in target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1, deleteOnExit=true 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/test.cache.data in system properties and HBase conf 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/hadoop.log.dir in system properties and HBase conf 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T20:47:27,261 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:47:27,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/nfs.dump.dir in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/java.io.tmpdir in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T20:47:27,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T20:47:27,276 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:47:27,320 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:47:27,323 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:47:27,325 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:47:27,325 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:47:27,325 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T20:47:27,325 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:47:27,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aae5916{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:47:27,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f3f4eb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:47:27,419 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d956b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/java.io.tmpdir/jetty-localhost-42211-hadoop-hdfs-3_4_1-tests_jar-_-any-13002649473846146658/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:47:27,420 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59c539e7{HTTP/1.1, (http/1.1)}{localhost:42211} 2024-11-11T20:47:27,420 INFO [Time-limited test {}] server.Server(415): Started @236644ms 2024-11-11T20:47:27,432 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:47:27,469 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:47:27,472 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:47:27,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:47:27,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:47:27,473 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T20:47:27,473 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5327e2a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:47:27,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@408fd242{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:47:27,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d8c7847{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/java.io.tmpdir/jetty-localhost-36859-hadoop-hdfs-3_4_1-tests_jar-_-any-8116121632892769431/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:47:27,572 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45890504{HTTP/1.1, (http/1.1)}{localhost:36859} 2024-11-11T20:47:27,572 INFO [Time-limited test {}] server.Server(415): Started @236797ms 2024-11-11T20:47:27,573 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:47:27,601 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:47:27,604 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:47:27,604 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:47:27,604 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:47:27,605 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:47:27,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44a25975{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:47:27,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7cf57c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:47:27,652 WARN [Thread-1954 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/data/data1/current/BP-1277814845-172.17.0.2-1731358047280/current, will proceed with Du for space computation calculation, 2024-11-11T20:47:27,652 WARN [Thread-1955 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/data/data2/current/BP-1277814845-172.17.0.2-1731358047280/current, will proceed with Du for space computation calculation, 2024-11-11T20:47:27,671 WARN [Thread-1933 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:47:27,673 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4ff140a83f23657 with lease ID 0x2fc94d332badd800: Processing first storage report for DS-e87af059-c04f-4086-bede-167a1794e837 from datanode DatanodeRegistration(127.0.0.1:33809, datanodeUuid=fb8a8c10-9d3e-4052-a3f9-56e949d8d679, infoPort=33773, infoSecurePort=0, ipcPort=35621, storageInfo=lv=-57;cid=testClusterID;nsid=1948439118;c=1731358047280) 2024-11-11T20:47:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4ff140a83f23657 with lease ID 0x2fc94d332badd800: from storage DS-e87af059-c04f-4086-bede-167a1794e837 node DatanodeRegistration(127.0.0.1:33809, datanodeUuid=fb8a8c10-9d3e-4052-a3f9-56e949d8d679, infoPort=33773, infoSecurePort=0, ipcPort=35621, storageInfo=lv=-57;cid=testClusterID;nsid=1948439118;c=1731358047280), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:47:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4ff140a83f23657 with lease ID 0x2fc94d332badd800: Processing first storage report for DS-cffb1232-de2f-4384-acaf-fd885921049b from datanode DatanodeRegistration(127.0.0.1:33809, datanodeUuid=fb8a8c10-9d3e-4052-a3f9-56e949d8d679, infoPort=33773, infoSecurePort=0, ipcPort=35621, storageInfo=lv=-57;cid=testClusterID;nsid=1948439118;c=1731358047280) 2024-11-11T20:47:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4ff140a83f23657 with lease ID 0x2fc94d332badd800: from storage DS-cffb1232-de2f-4384-acaf-fd885921049b node DatanodeRegistration(127.0.0.1:33809, datanodeUuid=fb8a8c10-9d3e-4052-a3f9-56e949d8d679, infoPort=33773, infoSecurePort=0, ipcPort=35621, storageInfo=lv=-57;cid=testClusterID;nsid=1948439118;c=1731358047280), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:47:27,724 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5689196f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/java.io.tmpdir/jetty-localhost-35299-hadoop-hdfs-3_4_1-tests_jar-_-any-11240518545269671802/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:47:27,725 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9612b29{HTTP/1.1, (http/1.1)}{localhost:35299} 2024-11-11T20:47:27,725 INFO [Time-limited test {}] server.Server(415): Started @236949ms 2024-11-11T20:47:27,726 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:47:27,782 WARN [Thread-1981 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/data/data4/current/BP-1277814845-172.17.0.2-1731358047280/current, will proceed with Du for space computation calculation, 2024-11-11T20:47:27,782 WARN [Thread-1980 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/data/data3/current/BP-1277814845-172.17.0.2-1731358047280/current, will proceed with Du for space computation calculation, 2024-11-11T20:47:27,798 WARN [Thread-1969 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:47:27,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50b41a6c0cfd144a with lease ID 0x2fc94d332badd801: Processing first storage report for DS-34186969-9627-4d44-9f87-35a6bb2515b5 from datanode DatanodeRegistration(127.0.0.1:38615, datanodeUuid=bedd0fd8-f18f-42cc-980c-68fad6ce366c, infoPort=35391, infoSecurePort=0, ipcPort=45567, storageInfo=lv=-57;cid=testClusterID;nsid=1948439118;c=1731358047280) 2024-11-11T20:47:27,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50b41a6c0cfd144a with lease ID 0x2fc94d332badd801: from storage DS-34186969-9627-4d44-9f87-35a6bb2515b5 node DatanodeRegistration(127.0.0.1:38615, datanodeUuid=bedd0fd8-f18f-42cc-980c-68fad6ce366c, infoPort=35391, infoSecurePort=0, ipcPort=45567, storageInfo=lv=-57;cid=testClusterID;nsid=1948439118;c=1731358047280), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:47:27,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50b41a6c0cfd144a with lease ID 0x2fc94d332badd801: Processing first storage report for DS-8344656a-7b9f-4df3-b0f8-a1ca467498c1 from datanode DatanodeRegistration(127.0.0.1:38615, datanodeUuid=bedd0fd8-f18f-42cc-980c-68fad6ce366c, infoPort=35391, infoSecurePort=0, ipcPort=45567, storageInfo=lv=-57;cid=testClusterID;nsid=1948439118;c=1731358047280) 2024-11-11T20:47:27,800 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50b41a6c0cfd144a with lease ID 0x2fc94d332badd801: from storage DS-8344656a-7b9f-4df3-b0f8-a1ca467498c1 node DatanodeRegistration(127.0.0.1:38615, datanodeUuid=bedd0fd8-f18f-42cc-980c-68fad6ce366c, infoPort=35391, infoSecurePort=0, ipcPort=45567, storageInfo=lv=-57;cid=testClusterID;nsid=1948439118;c=1731358047280), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:47:27,836 INFO [regionserver/51ca66f7ee3c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:47:27,846 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027 2024-11-11T20:47:27,849 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/zookeeper_0, clientPort=63469, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T20:47:27,851 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63469 2024-11-11T20:47:27,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:47:27,853 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:47:27,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:47:27,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:47:27,863 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb with version=8 2024-11-11T20:47:27,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/hbase-staging 2024-11-11T20:47:27,866 INFO [Time-limited test {}] client.ConnectionUtils(128): master/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:47:27,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:47:27,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:47:27,866 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:47:27,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:47:27,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:47:27,867 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T20:47:27,867 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:47:27,868 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36227 2024-11-11T20:47:27,870 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36227 connecting to ZooKeeper ensemble=127.0.0.1:63469 2024-11-11T20:47:27,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:362270x0, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:47:27,874 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36227-0x100308a9ad70000 connected 2024-11-11T20:47:27,888 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:47:27,889 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:47:27,890 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:47:27,890 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb, hbase.cluster.distributed=false 2024-11-11T20:47:27,892 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:47:27,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36227 2024-11-11T20:47:27,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36227 2024-11-11T20:47:27,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36227 2024-11-11T20:47:27,893 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36227 2024-11-11T20:47:27,893 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36227 2024-11-11T20:47:27,907 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:47:27,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:47:27,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:47:27,907 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:47:27,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:47:27,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:47:27,907 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T20:47:27,908 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:47:27,908 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45533 2024-11-11T20:47:27,910 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45533 connecting to ZooKeeper ensemble=127.0.0.1:63469 2024-11-11T20:47:27,911 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:47:27,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:47:27,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455330x0, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:47:27,916 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45533-0x100308a9ad70001 connected 2024-11-11T20:47:27,916 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:47:27,916 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T20:47:27,917 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T20:47:27,917 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T20:47:27,918 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:47:27,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45533 2024-11-11T20:47:27,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45533 2024-11-11T20:47:27,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45533 2024-11-11T20:47:27,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45533 2024-11-11T20:47:27,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45533 2024-11-11T20:47:27,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:27,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:27,930 DEBUG [M:0;51ca66f7ee3c:36227 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;51ca66f7ee3c:36227 2024-11-11T20:47:27,930 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/51ca66f7ee3c,36227,1731358047866 2024-11-11T20:47:27,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:47:27,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:47:27,931 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/51ca66f7ee3c,36227,1731358047866 2024-11-11T20:47:27,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T20:47:27,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:27,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:27,934 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T20:47:27,934 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/51ca66f7ee3c,36227,1731358047866 from backup master directory 2024-11-11T20:47:27,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:47:27,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/51ca66f7ee3c,36227,1731358047866 2024-11-11T20:47:27,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:47:27,935 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:47:27,935 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=51ca66f7ee3c,36227,1731358047866 2024-11-11T20:47:27,938 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/hbase.id] with ID: 7b5825e9-0807-4838-9f35-2e3ba6f3bfde 2024-11-11T20:47:27,939 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/.tmp/hbase.id 2024-11-11T20:47:27,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:47:27,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:47:27,947 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/.tmp/hbase.id]:[hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/hbase.id] 2024-11-11T20:47:27,959 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:47:27,959 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T20:47:27,961 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T20:47:27,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:27,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:27,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:47:27,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:47:27,969 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:47:27,970 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T20:47:27,970 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:47:27,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:47:27,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:47:27,978 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store 2024-11-11T20:47:27,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:47:27,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:47:27,984 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:47:27,985 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:47:27,985 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:47:27,985 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:47:27,985 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:47:27,985 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:47:27,985 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:47:27,985 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731358047985Disabling compacts and flushes for region at 1731358047985Disabling writes for close at 1731358047985Writing region close event to WAL at 1731358047985Closed at 1731358047985 2024-11-11T20:47:27,986 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/.initializing 2024-11-11T20:47:27,986 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/WALs/51ca66f7ee3c,36227,1731358047866 2024-11-11T20:47:27,988 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C36227%2C1731358047866, suffix=, logDir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/WALs/51ca66f7ee3c,36227,1731358047866, archiveDir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/oldWALs, maxLogs=10 2024-11-11T20:47:27,989 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C36227%2C1731358047866.1731358047988 2024-11-11T20:47:27,993 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/WALs/51ca66f7ee3c,36227,1731358047866/51ca66f7ee3c%2C36227%2C1731358047866.1731358047988 2024-11-11T20:47:27,994 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33773:33773),(127.0.0.1/127.0.0.1:35391:35391)] 2024-11-11T20:47:27,994 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:47:27,994 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:47:27,994 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:27,994 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:27,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:27,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T20:47:27,996 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:27,997 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:47:27,997 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:27,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T20:47:27,998 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:27,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:47:27,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:27,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T20:47:27,999 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:47:28,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:28,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T20:47:28,001 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:47:28,001 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:28,002 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:28,002 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:28,004 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:28,004 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:28,004 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T20:47:28,005 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:47:28,008 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:47:28,009 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856724, jitterRate=0.08938100934028625}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T20:47:28,009 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731358047994Initializing all the Stores at 1731358047995 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358047995Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358047995Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358047995Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358047995Cleaning up temporary data from old regions at 1731358048004 (+9 ms)Region opened successfully at 1731358048009 (+5 ms) 2024-11-11T20:47:28,009 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T20:47:28,012 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51289244, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:47:28,013 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T20:47:28,013 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T20:47:28,013 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T20:47:28,013 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T20:47:28,014 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T20:47:28,014 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T20:47:28,014 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T20:47:28,016 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T20:47:28,017 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T20:47:28,017 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T20:47:28,018 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T20:47:28,018 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T20:47:28,019 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T20:47:28,019 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T20:47:28,020 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T20:47:28,021 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T20:47:28,021 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T20:47:28,022 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T20:47:28,023 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T20:47:28,024 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T20:47:28,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:47:28,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:28,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:47:28,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:28,026 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=51ca66f7ee3c,36227,1731358047866, sessionid=0x100308a9ad70000, setting cluster-up flag (Was=false) 2024-11-11T20:47:28,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:28,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:28,029 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T20:47:28,030 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,36227,1731358047866 2024-11-11T20:47:28,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:28,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:28,035 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T20:47:28,035 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,36227,1731358047866 2024-11-11T20:47:28,036 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T20:47:28,038 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T20:47:28,038 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T20:47:28,038 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T20:47:28,038 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 51ca66f7ee3c,36227,1731358047866 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T20:47:28,040 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:47:28,040 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:47:28,040 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:47:28,040 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:47:28,040 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/51ca66f7ee3c:0, corePoolSize=10, maxPoolSize=10 2024-11-11T20:47:28,040 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,040 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:47:28,040 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,042 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731358078042 2024-11-11T20:47:28,042 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T20:47:28,042 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:47:28,042 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T20:47:28,042 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T20:47:28,042 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T20:47:28,042 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T20:47:28,042 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T20:47:28,042 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T20:47:28,043 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,043 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T20:47:28,043 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T20:47:28,043 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T20:47:28,043 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,044 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T20:47:28,044 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T20:47:28,044 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T20:47:28,045 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731358048044,5,FailOnTimeoutGroup] 2024-11-11T20:47:28,048 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731358048045,5,FailOnTimeoutGroup] 2024-11-11T20:47:28,048 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,049 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T20:47:28,049 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,049 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:47:28,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:47:28,059 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T20:47:28,059 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb 2024-11-11T20:47:28,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:47:28,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:47:28,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:47:28,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:47:28,068 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:47:28,068 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,069 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:47:28,069 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:47:28,070 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:47:28,070 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:47:28,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:47:28,071 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:47:28,071 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:47:28,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:47:28,073 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:47:28,073 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:47:28,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:47:28,074 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740 2024-11-11T20:47:28,074 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740 2024-11-11T20:47:28,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:47:28,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:47:28,076 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:47:28,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:47:28,078 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:47:28,079 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785565, jitterRate=-0.0011036098003387451}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:47:28,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731358048066Initializing all the Stores at 1731358048066Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358048066Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358048067 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358048067Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358048067Cleaning up temporary data from old regions at 1731358048075 (+8 ms)Region opened successfully at 1731358048080 (+5 ms) 2024-11-11T20:47:28,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:47:28,080 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:47:28,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:47:28,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:47:28,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:47:28,080 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:47:28,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731358048080Disabling compacts and flushes for region at 1731358048080Disabling writes for close at 1731358048080Writing region close event to WAL at 1731358048080Closed at 1731358048080 2024-11-11T20:47:28,081 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:47:28,081 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T20:47:28,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T20:47:28,083 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:47:28,084 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T20:47:28,121 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(746): ClusterId : 7b5825e9-0807-4838-9f35-2e3ba6f3bfde 2024-11-11T20:47:28,121 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T20:47:28,123 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T20:47:28,123 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T20:47:28,125 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T20:47:28,125 DEBUG [RS:0;51ca66f7ee3c:45533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b681da4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:47:28,136 DEBUG [RS:0;51ca66f7ee3c:45533 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;51ca66f7ee3c:45533 2024-11-11T20:47:28,136 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T20:47:28,136 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T20:47:28,136 DEBUG [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T20:47:28,137 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(2659): reportForDuty to master=51ca66f7ee3c,36227,1731358047866 with port=45533, startcode=1731358047907 2024-11-11T20:47:28,137 DEBUG [RS:0;51ca66f7ee3c:45533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T20:47:28,139 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51851, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T20:47:28,140 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36227 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:28,140 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36227 {}] master.ServerManager(517): Registering regionserver=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:28,142 DEBUG [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb 2024-11-11T20:47:28,142 DEBUG [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41561 2024-11-11T20:47:28,142 DEBUG [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T20:47:28,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:47:28,143 DEBUG [RS:0;51ca66f7ee3c:45533 {}] zookeeper.ZKUtil(111): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:28,143 WARN [RS:0;51ca66f7ee3c:45533 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:47:28,143 INFO [RS:0;51ca66f7ee3c:45533 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:47:28,144 DEBUG [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:28,144 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [51ca66f7ee3c,45533,1731358047907] 2024-11-11T20:47:28,147 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T20:47:28,149 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T20:47:28,149 INFO [RS:0;51ca66f7ee3c:45533 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T20:47:28,149 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,153 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T20:47:28,154 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T20:47:28,154 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:47:28,154 DEBUG [RS:0;51ca66f7ee3c:45533 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:47:28,156 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,156 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,156 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,156 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,156 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,156 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,45533,1731358047907-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:47:28,171 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T20:47:28,171 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,45533,1731358047907-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,172 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,172 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.Replication(171): 51ca66f7ee3c,45533,1731358047907 started 2024-11-11T20:47:28,186 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,187 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(1482): Serving as 51ca66f7ee3c,45533,1731358047907, RpcServer on 51ca66f7ee3c/172.17.0.2:45533, sessionid=0x100308a9ad70001 2024-11-11T20:47:28,187 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T20:47:28,187 DEBUG [RS:0;51ca66f7ee3c:45533 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:28,187 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,45533,1731358047907' 2024-11-11T20:47:28,187 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T20:47:28,187 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T20:47:28,188 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T20:47:28,188 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T20:47:28,188 DEBUG [RS:0;51ca66f7ee3c:45533 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:28,188 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,45533,1731358047907' 2024-11-11T20:47:28,188 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T20:47:28,188 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T20:47:28,188 DEBUG [RS:0;51ca66f7ee3c:45533 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T20:47:28,189 INFO [RS:0;51ca66f7ee3c:45533 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T20:47:28,189 INFO [RS:0;51ca66f7ee3c:45533 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T20:47:28,234 WARN [51ca66f7ee3c:36227 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T20:47:28,291 INFO [RS:0;51ca66f7ee3c:45533 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C45533%2C1731358047907, suffix=, logDir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907, archiveDir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/oldWALs, maxLogs=32 2024-11-11T20:47:28,291 INFO [RS:0;51ca66f7ee3c:45533 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C45533%2C1731358047907.1731358048291 2024-11-11T20:47:28,296 INFO [RS:0;51ca66f7ee3c:45533 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907/51ca66f7ee3c%2C45533%2C1731358047907.1731358048291 2024-11-11T20:47:28,297 DEBUG [RS:0;51ca66f7ee3c:45533 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35391:35391),(127.0.0.1/127.0.0.1:33773:33773)] 2024-11-11T20:47:28,484 DEBUG [51ca66f7ee3c:36227 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T20:47:28,485 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:28,489 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,45533,1731358047907, state=OPENING 2024-11-11T20:47:28,490 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T20:47:28,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:28,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:47:28,493 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:47:28,493 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:47:28,493 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:47:28,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,45533,1731358047907}] 2024-11-11T20:47:28,648 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T20:47:28,652 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41319, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T20:47:28,658 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T20:47:28,659 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:47:28,661 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C45533%2C1731358047907.meta, suffix=.meta, logDir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907, archiveDir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/oldWALs, maxLogs=32 2024-11-11T20:47:28,661 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C45533%2C1731358047907.meta.1731358048661.meta 2024-11-11T20:47:28,665 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907/51ca66f7ee3c%2C45533%2C1731358047907.meta.1731358048661.meta 2024-11-11T20:47:28,667 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33773:33773),(127.0.0.1/127.0.0.1:35391:35391)] 2024-11-11T20:47:28,668 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:47:28,668 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T20:47:28,668 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T20:47:28,668 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T20:47:28,668 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T20:47:28,669 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:47:28,669 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T20:47:28,669 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T20:47:28,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:47:28,670 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:47:28,671 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,671 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:47:28,671 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:47:28,672 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:47:28,672 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:47:28,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:47:28,673 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:47:28,673 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:47:28,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:47:28,674 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:47:28,674 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:47:28,675 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:47:28,675 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740 2024-11-11T20:47:28,676 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740 2024-11-11T20:47:28,678 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:47:28,678 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:47:28,678 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:47:28,679 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:47:28,680 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760527, jitterRate=-0.03294004499912262}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:47:28,680 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T20:47:28,680 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731358048669Writing region info on filesystem at 1731358048669Initializing all the Stores at 1731358048669Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358048670 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358048670Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358048670Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358048670Cleaning up temporary data from old regions at 1731358048678 (+8 ms)Running coprocessor post-open hooks at 1731358048680 (+2 ms)Region opened successfully at 1731358048680 2024-11-11T20:47:28,681 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731358048648 2024-11-11T20:47:28,684 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T20:47:28,684 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T20:47:28,685 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:28,685 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,45533,1731358047907, state=OPEN 2024-11-11T20:47:28,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:47:28,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:47:28,688 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:28,688 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:47:28,688 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:47:28,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T20:47:28,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,45533,1731358047907 in 195 msec 2024-11-11T20:47:28,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T20:47:28,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-11-11T20:47:28,694 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:47:28,694 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T20:47:28,695 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:47:28,695 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,45533,1731358047907, seqNum=-1] 2024-11-11T20:47:28,695 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:47:28,696 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57023, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:47:28,701 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 663 msec 2024-11-11T20:47:28,701 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731358048701, completionTime=-1 2024-11-11T20:47:28,702 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T20:47:28,702 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T20:47:28,703 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T20:47:28,703 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731358108703 2024-11-11T20:47:28,703 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731358168703 2024-11-11T20:47:28,703 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-11T20:47:28,704 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,36227,1731358047866-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,704 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,36227,1731358047866-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,704 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,36227,1731358047866-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,704 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-51ca66f7ee3c:36227, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,704 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,704 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,706 DEBUG [master/51ca66f7ee3c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T20:47:28,708 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.773sec 2024-11-11T20:47:28,708 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T20:47:28,708 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T20:47:28,708 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T20:47:28,708 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T20:47:28,708 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T20:47:28,708 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,36227,1731358047866-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:47:28,709 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,36227,1731358047866-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T20:47:28,711 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T20:47:28,711 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T20:47:28,711 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,36227,1731358047866-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:47:28,721 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dea9c62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:47:28,721 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 51ca66f7ee3c,36227,-1 for getting cluster id 2024-11-11T20:47:28,722 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T20:47:28,723 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7b5825e9-0807-4838-9f35-2e3ba6f3bfde' 2024-11-11T20:47:28,723 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T20:47:28,723 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7b5825e9-0807-4838-9f35-2e3ba6f3bfde" 2024-11-11T20:47:28,724 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46f677bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:47:28,724 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [51ca66f7ee3c,36227,-1] 2024-11-11T20:47:28,724 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T20:47:28,724 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:47:28,725 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59882, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T20:47:28,726 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@286b8c80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:47:28,726 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:47:28,727 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,45533,1731358047907, seqNum=-1] 2024-11-11T20:47:28,728 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:47:28,729 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34524, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:47:28,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=51ca66f7ee3c,36227,1731358047866 2024-11-11T20:47:28,731 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:47:28,733 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T20:47:28,733 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T20:47:28,734 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 51ca66f7ee3c,36227,1731358047866 2024-11-11T20:47:28,734 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@b2ab9d7 2024-11-11T20:47:28,734 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T20:47:28,735 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59892, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T20:47:28,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36227 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T20:47:28,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36227 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T20:47:28,736 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:47:28,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36227 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-11T20:47:28,739 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T20:47:28,739 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:28,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-11T20:47:28,740 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T20:47:28,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T20:47:28,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741835_1011 (size=381) 2024-11-11T20:47:28,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741835_1011 (size=381) 2024-11-11T20:47:28,751 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 31e28af4302d239e8d29f4d067b98d43, NAME => 'TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb 2024-11-11T20:47:28,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741836_1012 (size=64) 2024-11-11T20:47:28,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741836_1012 (size=64) 2024-11-11T20:47:28,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:47:28,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 31e28af4302d239e8d29f4d067b98d43, disabling compactions & flushes 2024-11-11T20:47:28,757 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:28,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:28,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. after waiting 0 ms 2024-11-11T20:47:28,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:28,757 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:28,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 31e28af4302d239e8d29f4d067b98d43: Waiting for close lock at 1731358048757Disabling compacts and flushes for region at 1731358048757Disabling writes for close at 1731358048757Writing region close event to WAL at 1731358048757Closed at 1731358048757 2024-11-11T20:47:28,758 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T20:47:28,759 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731358048758"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731358048758"}]},"ts":"1731358048758"} 2024-11-11T20:47:28,761 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T20:47:28,762 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T20:47:28,762 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731358048762"}]},"ts":"1731358048762"} 2024-11-11T20:47:28,764 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-11T20:47:28,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31e28af4302d239e8d29f4d067b98d43, ASSIGN}] 2024-11-11T20:47:28,765 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31e28af4302d239e8d29f4d067b98d43, ASSIGN 2024-11-11T20:47:28,766 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31e28af4302d239e8d29f4d067b98d43, ASSIGN; state=OFFLINE, location=51ca66f7ee3c,45533,1731358047907; forceNewPlan=false, retain=false 2024-11-11T20:47:28,918 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=31e28af4302d239e8d29f4d067b98d43, regionState=OPENING, regionLocation=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:28,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31e28af4302d239e8d29f4d067b98d43, ASSIGN because future has completed 2024-11-11T20:47:28,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:28,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:28,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31e28af4302d239e8d29f4d067b98d43, server=51ca66f7ee3c,45533,1731358047907}] 2024-11-11T20:47:29,084 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:29,084 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 31e28af4302d239e8d29f4d067b98d43, NAME => 'TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:47:29,085 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,085 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:47:29,085 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,085 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,088 INFO [StoreOpener-31e28af4302d239e8d29f4d067b98d43-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,090 INFO [StoreOpener-31e28af4302d239e8d29f4d067b98d43-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31e28af4302d239e8d29f4d067b98d43 columnFamilyName info 2024-11-11T20:47:29,090 DEBUG [StoreOpener-31e28af4302d239e8d29f4d067b98d43-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:29,091 INFO [StoreOpener-31e28af4302d239e8d29f4d067b98d43-1 {}] regionserver.HStore(327): Store=31e28af4302d239e8d29f4d067b98d43/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:47:29,091 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,092 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,093 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,094 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,094 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,096 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,099 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:47:29,099 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 31e28af4302d239e8d29f4d067b98d43; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757127, jitterRate=-0.037263572216033936}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T20:47:29,099 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:29,100 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 31e28af4302d239e8d29f4d067b98d43: Running coprocessor pre-open hook at 1731358049085Writing region info on filesystem at 1731358049085Initializing all the Stores at 1731358049087 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358049087Cleaning up temporary data from old regions at 1731358049094 (+7 ms)Running coprocessor post-open hooks at 1731358049099 (+5 ms)Region opened successfully at 1731358049100 (+1 ms) 2024-11-11T20:47:29,101 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., pid=6, masterSystemTime=1731358049078 2024-11-11T20:47:29,103 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:29,103 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:29,104 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=31e28af4302d239e8d29f4d067b98d43, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:29,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31e28af4302d239e8d29f4d067b98d43, server=51ca66f7ee3c,45533,1731358047907 because future has completed 2024-11-11T20:47:29,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T20:47:29,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 31e28af4302d239e8d29f4d067b98d43, server=51ca66f7ee3c,45533,1731358047907 in 182 msec 2024-11-11T20:47:29,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T20:47:29,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31e28af4302d239e8d29f4d067b98d43, ASSIGN in 345 msec 2024-11-11T20:47:29,113 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T20:47:29,113 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731358049113"}]},"ts":"1731358049113"} 2024-11-11T20:47:29,116 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-11T20:47:29,118 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T20:47:29,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 382 msec 2024-11-11T20:47:29,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:29,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:30,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:30,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:31,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,786 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,786 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:31,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:31,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:32,318 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T20:47:32,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:32,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:32,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:33,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:33,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:34,147 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T20:47:34,148 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-11T20:47:34,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:34,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:35,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:35,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:36,830 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-11T20:47:36,831 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T20:47:36,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:36,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:37,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:37,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:38,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T20:47:38,830 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-11T20:47:38,830 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-11T20:47:38,835 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-11T20:47:38,835 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:38,839 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., hostname=51ca66f7ee3c,45533,1731358047907, seqNum=2] 2024-11-11T20:47:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:38,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31e28af4302d239e8d29f4d067b98d43 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:47:38,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/e8f259d5fc3d45cc84787332764d1e58 is 1080, key is row0001/info:/1731358058840/Put/seqid=0 2024-11-11T20:47:38,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741837_1013 (size=12509) 2024-11-11T20:47:38,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/e8f259d5fc3d45cc84787332764d1e58 2024-11-11T20:47:38,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741837_1013 (size=12509) 2024-11-11T20:47:38,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/e8f259d5fc3d45cc84787332764d1e58 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/e8f259d5fc3d45cc84787332764d1e58 2024-11-11T20:47:38,896 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/e8f259d5fc3d45cc84787332764d1e58, entries=7, sequenceid=11, filesize=12.2 K 2024-11-11T20:47:38,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 31e28af4302d239e8d29f4d067b98d43 in 40ms, sequenceid=11, compaction requested=false 2024-11-11T20:47:38,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:38,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:38,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31e28af4302d239e8d29f4d067b98d43 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-11T20:47:38,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/7a65a1ba311b4f128f3953ae4eb32803 is 1080, key is row0008/info:/1731358058859/Put/seqid=0 2024-11-11T20:47:38,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741838_1014 (size=25453) 2024-11-11T20:47:38,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741838_1014 (size=25453) 2024-11-11T20:47:38,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/7a65a1ba311b4f128f3953ae4eb32803 2024-11-11T20:47:38,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/7a65a1ba311b4f128f3953ae4eb32803 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/7a65a1ba311b4f128f3953ae4eb32803 2024-11-11T20:47:38,919 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/7a65a1ba311b4f128f3953ae4eb32803, entries=19, sequenceid=33, filesize=24.9 K 2024-11-11T20:47:38,920 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=6.30 KB/6456 for 31e28af4302d239e8d29f4d067b98d43 in 22ms, sequenceid=33, compaction requested=false 2024-11-11T20:47:38,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:38,920 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=37.1 K, sizeToCheck=16.0 K 2024-11-11T20:47:38,920 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:38,920 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/7a65a1ba311b4f128f3953ae4eb32803 because midkey is the same as first or last row 2024-11-11T20:47:38,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:38,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:39,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:39,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:40,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:40,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31e28af4302d239e8d29f4d067b98d43 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:47:40,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/1b90f77d77334d21ab546e53915af6a9 is 1080, key is row0027/info:/1731358058899/Put/seqid=0 2024-11-11T20:47:40,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741839_1015 (size=12509) 2024-11-11T20:47:40,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741839_1015 (size=12509) 2024-11-11T20:47:40,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/1b90f77d77334d21ab546e53915af6a9 2024-11-11T20:47:40,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/1b90f77d77334d21ab546e53915af6a9 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/1b90f77d77334d21ab546e53915af6a9 2024-11-11T20:47:40,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:40,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:40,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/1b90f77d77334d21ab546e53915af6a9, entries=7, sequenceid=43, filesize=12.2 K 2024-11-11T20:47:40,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 31e28af4302d239e8d29f4d067b98d43 in 26ms, sequenceid=43, compaction requested=true 2024-11-11T20:47:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-11T20:47:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/7a65a1ba311b4f128f3953ae4eb32803 because midkey is the same as first or last row 2024-11-11T20:47:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31e28af4302d239e8d29f4d067b98d43:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:47:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:40,941 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:47:40,943 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:47:40,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:40,943 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1541): 31e28af4302d239e8d29f4d067b98d43/info is initiating minor compaction (all files) 2024-11-11T20:47:40,943 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31e28af4302d239e8d29f4d067b98d43/info in TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:40,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31e28af4302d239e8d29f4d067b98d43 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T20:47:40,943 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/e8f259d5fc3d45cc84787332764d1e58, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/7a65a1ba311b4f128f3953ae4eb32803, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/1b90f77d77334d21ab546e53915af6a9] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp, totalSize=49.3 K 2024-11-11T20:47:40,943 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting e8f259d5fc3d45cc84787332764d1e58, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731358058840 2024-11-11T20:47:40,944 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7a65a1ba311b4f128f3953ae4eb32803, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=33, earliestPutTs=1731358058859 2024-11-11T20:47:40,944 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b90f77d77334d21ab546e53915af6a9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731358058899 2024-11-11T20:47:40,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/50e6b7e5198548148ca743fd66e28a90 is 1080, key is row0034/info:/1731358060916/Put/seqid=0 2024-11-11T20:47:40,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741840_1016 (size=17894) 2024-11-11T20:47:40,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741840_1016 (size=17894) 2024-11-11T20:47:40,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/50e6b7e5198548148ca743fd66e28a90 2024-11-11T20:47:40,960 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31e28af4302d239e8d29f4d067b98d43#info#compaction#58 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:47:40,960 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/88ebb6e9c633467a8cf83e6e78bb99b2 is 1080, key is row0001/info:/1731358058840/Put/seqid=0 2024-11-11T20:47:40,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/50e6b7e5198548148ca743fd66e28a90 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/50e6b7e5198548148ca743fd66e28a90 2024-11-11T20:47:40,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741841_1017 (size=40670) 2024-11-11T20:47:40,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741841_1017 (size=40670) 2024-11-11T20:47:40,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/50e6b7e5198548148ca743fd66e28a90, entries=12, sequenceid=58, filesize=17.5 K 2024-11-11T20:47:40,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 31e28af4302d239e8d29f4d067b98d43 in 27ms, sequenceid=58, compaction requested=false 2024-11-11T20:47:40,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:40,970 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.8 K, sizeToCheck=16.0 K 2024-11-11T20:47:40,970 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:40,970 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/7a65a1ba311b4f128f3953ae4eb32803 because midkey is the same as first or last row 2024-11-11T20:47:40,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:40,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31e28af4302d239e8d29f4d067b98d43 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T20:47:40,975 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/88ebb6e9c633467a8cf83e6e78bb99b2 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/88ebb6e9c633467a8cf83e6e78bb99b2 2024-11-11T20:47:40,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/d3629cc16f074023ae373191cb3fe681 is 1080, key is row0046/info:/1731358060944/Put/seqid=0 2024-11-11T20:47:40,982 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31e28af4302d239e8d29f4d067b98d43/info of 31e28af4302d239e8d29f4d067b98d43 into 88ebb6e9c633467a8cf83e6e78bb99b2(size=39.7 K), total size for store is 57.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:47:40,982 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:40,982 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., storeName=31e28af4302d239e8d29f4d067b98d43/info, priority=13, startTime=1731358060941; duration=0sec 2024-11-11T20:47:40,982 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-11T20:47:40,982 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:40,982 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/88ebb6e9c633467a8cf83e6e78bb99b2 because midkey is the same as first or last row 2024-11-11T20:47:40,982 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-11T20:47:40,982 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:40,982 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/88ebb6e9c633467a8cf83e6e78bb99b2 because midkey is the same as first or last row 2024-11-11T20:47:40,982 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-11T20:47:40,982 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:40,983 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/88ebb6e9c633467a8cf83e6e78bb99b2 because midkey is the same as first or last row 2024-11-11T20:47:40,983 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:40,983 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31e28af4302d239e8d29f4d067b98d43:info 2024-11-11T20:47:40,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741842_1018 (size=17894) 2024-11-11T20:47:40,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741842_1018 (size=17894) 2024-11-11T20:47:41,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/d3629cc16f074023ae373191cb3fe681 2024-11-11T20:47:41,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/d3629cc16f074023ae373191cb3fe681 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/d3629cc16f074023ae373191cb3fe681 2024-11-11T20:47:41,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/d3629cc16f074023ae373191cb3fe681, entries=12, sequenceid=73, filesize=17.5 K 2024-11-11T20:47:41,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for 31e28af4302d239e8d29f4d067b98d43 in 435ms, sequenceid=73, compaction requested=true 2024-11-11T20:47:41,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:41,407 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-11-11T20:47:41,407 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:41,407 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/88ebb6e9c633467a8cf83e6e78bb99b2 because midkey is the same as first or last row 2024-11-11T20:47:41,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31e28af4302d239e8d29f4d067b98d43:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:47:41,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:41,407 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:47:41,409 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76458 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:47:41,409 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1541): 31e28af4302d239e8d29f4d067b98d43/info is initiating minor compaction (all files) 2024-11-11T20:47:41,409 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31e28af4302d239e8d29f4d067b98d43/info in TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:41,409 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/88ebb6e9c633467a8cf83e6e78bb99b2, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/50e6b7e5198548148ca743fd66e28a90, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/d3629cc16f074023ae373191cb3fe681] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp, totalSize=74.7 K 2024-11-11T20:47:41,410 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 88ebb6e9c633467a8cf83e6e78bb99b2, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731358058840 2024-11-11T20:47:41,410 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 50e6b7e5198548148ca743fd66e28a90, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1731358060916 2024-11-11T20:47:41,410 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3629cc16f074023ae373191cb3fe681, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1731358060944 2024-11-11T20:47:41,425 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31e28af4302d239e8d29f4d067b98d43#info#compaction#60 average throughput is 19.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:47:41,426 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/35f83c534fa340c182b6e62cf35f33f6 is 1080, key is row0001/info:/1731358058840/Put/seqid=0 2024-11-11T20:47:41,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741843_1019 (size=66689) 2024-11-11T20:47:41,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741843_1019 (size=66689) 2024-11-11T20:47:41,436 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/35f83c534fa340c182b6e62cf35f33f6 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6 2024-11-11T20:47:41,441 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31e28af4302d239e8d29f4d067b98d43/info of 31e28af4302d239e8d29f4d067b98d43 into 35f83c534fa340c182b6e62cf35f33f6(size=65.1 K), total size for store is 65.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:41,441 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., storeName=31e28af4302d239e8d29f4d067b98d43/info, priority=13, startTime=1731358061407; duration=0sec 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6 because midkey is the same as first or last row 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6 because midkey is the same as first or last row 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6 because midkey is the same as first or last row 2024-11-11T20:47:41,441 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:41,442 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31e28af4302d239e8d29f4d067b98d43:info 2024-11-11T20:47:41,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:41,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:42,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:42,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:42,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:42,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31e28af4302d239e8d29f4d067b98d43 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-11T20:47:43,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/213af03e37a74ec0a1b23b068b8f496e is 1080, key is row0058/info:/1731358060974/Put/seqid=0 2024-11-11T20:47:43,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741844_1020 (size=13586) 2024-11-11T20:47:43,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741844_1020 (size=13586) 2024-11-11T20:47:43,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/213af03e37a74ec0a1b23b068b8f496e 2024-11-11T20:47:43,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/213af03e37a74ec0a1b23b068b8f496e as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/213af03e37a74ec0a1b23b068b8f496e 2024-11-11T20:47:43,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/213af03e37a74ec0a1b23b068b8f496e, entries=8, sequenceid=86, filesize=13.3 K 2024-11-11T20:47:43,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for 31e28af4302d239e8d29f4d067b98d43 in 24ms, sequenceid=86, compaction requested=false 2024-11-11T20:47:43,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:43,019 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-11T20:47:43,019 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:43,019 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6 because midkey is the same as first or last row 2024-11-11T20:47:43,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31e28af4302d239e8d29f4d067b98d43 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-11T20:47:43,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/883b6cb4dc7f4b12a2158fb4da3aabdd is 1080, key is row0066/info:/1731358062999/Put/seqid=0 2024-11-11T20:47:43,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741845_1021 (size=16817) 2024-11-11T20:47:43,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741845_1021 (size=16817) 2024-11-11T20:47:43,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/883b6cb4dc7f4b12a2158fb4da3aabdd 2024-11-11T20:47:43,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/883b6cb4dc7f4b12a2158fb4da3aabdd as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/883b6cb4dc7f4b12a2158fb4da3aabdd 2024-11-11T20:47:43,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/883b6cb4dc7f4b12a2158fb4da3aabdd, entries=11, sequenceid=100, filesize=16.4 K 2024-11-11T20:47:43,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for 31e28af4302d239e8d29f4d067b98d43 in 23ms, sequenceid=100, compaction requested=true 2024-11-11T20:47:43,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:43,044 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.8 K, sizeToCheck=16.0 K 2024-11-11T20:47:43,044 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:43,044 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6 because midkey is the same as first or last row 2024-11-11T20:47:43,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31e28af4302d239e8d29f4d067b98d43:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:47:43,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:43,044 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:47:43,045 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 97092 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:47:43,045 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1541): 31e28af4302d239e8d29f4d067b98d43/info is initiating minor compaction (all files) 2024-11-11T20:47:43,045 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31e28af4302d239e8d29f4d067b98d43/info in TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:43,045 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/213af03e37a74ec0a1b23b068b8f496e, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/883b6cb4dc7f4b12a2158fb4da3aabdd] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp, totalSize=94.8 K 2024-11-11T20:47:43,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31e28af4302d239e8d29f4d067b98d43 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T20:47:43,046 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 35f83c534fa340c182b6e62cf35f33f6, keycount=57, bloomtype=ROW, size=65.1 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1731358058840 2024-11-11T20:47:43,046 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 213af03e37a74ec0a1b23b068b8f496e, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1731358060974 2024-11-11T20:47:43,046 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 883b6cb4dc7f4b12a2158fb4da3aabdd, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731358062999 2024-11-11T20:47:43,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/ab7f0a443e19411a9237bb375e5c07e3 is 1080, key is row0077/info:/1731358063022/Put/seqid=0 2024-11-11T20:47:43,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741846_1022 (size=17894) 2024-11-11T20:47:43,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741846_1022 (size=17894) 2024-11-11T20:47:43,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/ab7f0a443e19411a9237bb375e5c07e3 2024-11-11T20:47:43,059 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31e28af4302d239e8d29f4d067b98d43#info#compaction#64 average throughput is 38.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:47:43,060 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/60d2284b6f554f8eb691c71dd5237733 is 1080, key is row0001/info:/1731358058840/Put/seqid=0 2024-11-11T20:47:43,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/ab7f0a443e19411a9237bb375e5c07e3 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/ab7f0a443e19411a9237bb375e5c07e3 2024-11-11T20:47:43,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/ab7f0a443e19411a9237bb375e5c07e3, entries=12, sequenceid=115, filesize=17.5 K 2024-11-11T20:47:43,068 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=8.41 KB/8608 for 31e28af4302d239e8d29f4d067b98d43 in 22ms, sequenceid=115, compaction requested=false 2024-11-11T20:47:43,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:43,068 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.3 K, sizeToCheck=16.0 K 2024-11-11T20:47:43,068 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:43,069 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6 because midkey is the same as first or last row 2024-11-11T20:47:43,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741847_1023 (size=87327) 2024-11-11T20:47:43,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741847_1023 (size=87327) 2024-11-11T20:47:43,076 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/60d2284b6f554f8eb691c71dd5237733 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/60d2284b6f554f8eb691c71dd5237733 2024-11-11T20:47:43,082 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31e28af4302d239e8d29f4d067b98d43/info of 31e28af4302d239e8d29f4d067b98d43 into 60d2284b6f554f8eb691c71dd5237733(size=85.3 K), total size for store is 102.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:47:43,082 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31e28af4302d239e8d29f4d067b98d43: 2024-11-11T20:47:43,082 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., storeName=31e28af4302d239e8d29f4d067b98d43/info, priority=13, startTime=1731358063044; duration=0sec 2024-11-11T20:47:43,082 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-11T20:47:43,082 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:43,082 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-11T20:47:43,082 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:43,082 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-11T20:47:43,082 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T20:47:43,083 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:43,083 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:43,083 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31e28af4302d239e8d29f4d067b98d43:info 2024-11-11T20:47:43,084 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36227 {}] assignment.AssignmentManager(1363): Split request from 51ca66f7ee3c,45533,1731358047907, parent={ENCODED => 31e28af4302d239e8d29f4d067b98d43, NAME => 'TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-11T20:47:43,089 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36227 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:43,093 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36227 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=31e28af4302d239e8d29f4d067b98d43, daughterA=17b383949ba3671cbe9bcb0fc4722887, daughterB=e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,095 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=31e28af4302d239e8d29f4d067b98d43, daughterA=17b383949ba3671cbe9bcb0fc4722887, daughterB=e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,095 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=31e28af4302d239e8d29f4d067b98d43, daughterA=17b383949ba3671cbe9bcb0fc4722887, daughterB=e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,095 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=31e28af4302d239e8d29f4d067b98d43, daughterA=17b383949ba3671cbe9bcb0fc4722887, daughterB=e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31e28af4302d239e8d29f4d067b98d43, UNASSIGN}] 2024-11-11T20:47:43,103 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31e28af4302d239e8d29f4d067b98d43, UNASSIGN 2024-11-11T20:47:43,104 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=31e28af4302d239e8d29f4d067b98d43, regionState=CLOSING, regionLocation=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:43,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31e28af4302d239e8d29f4d067b98d43, UNASSIGN because future has completed 2024-11-11T20:47:43,107 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-11T20:47:43,107 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 31e28af4302d239e8d29f4d067b98d43, server=51ca66f7ee3c,45533,1731358047907}] 2024-11-11T20:47:43,267 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,267 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-11T20:47:43,268 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 31e28af4302d239e8d29f4d067b98d43, disabling compactions & flushes 2024-11-11T20:47:43,268 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:43,269 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:43,269 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. after waiting 0 ms 2024-11-11T20:47:43,269 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:43,269 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 31e28af4302d239e8d29f4d067b98d43 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-11T20:47:43,279 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/58fda7b7050e4ce2b8bc3371bf4332f5 is 1080, key is row0089/info:/1731358063047/Put/seqid=0 2024-11-11T20:47:43,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741848_1024 (size=13586) 2024-11-11T20:47:43,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741848_1024 (size=13586) 2024-11-11T20:47:43,284 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/58fda7b7050e4ce2b8bc3371bf4332f5 2024-11-11T20:47:43,289 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/.tmp/info/58fda7b7050e4ce2b8bc3371bf4332f5 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/58fda7b7050e4ce2b8bc3371bf4332f5 2024-11-11T20:47:43,295 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/58fda7b7050e4ce2b8bc3371bf4332f5, entries=8, sequenceid=127, filesize=13.3 K 2024-11-11T20:47:43,297 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 31e28af4302d239e8d29f4d067b98d43 in 27ms, sequenceid=127, compaction requested=true 2024-11-11T20:47:43,298 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/e8f259d5fc3d45cc84787332764d1e58, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/7a65a1ba311b4f128f3953ae4eb32803, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/88ebb6e9c633467a8cf83e6e78bb99b2, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/1b90f77d77334d21ab546e53915af6a9, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/50e6b7e5198548148ca743fd66e28a90, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/d3629cc16f074023ae373191cb3fe681, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/213af03e37a74ec0a1b23b068b8f496e, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/883b6cb4dc7f4b12a2158fb4da3aabdd] to archive 2024-11-11T20:47:43,299 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T20:47:43,301 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/e8f259d5fc3d45cc84787332764d1e58 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/e8f259d5fc3d45cc84787332764d1e58 2024-11-11T20:47:43,302 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/7a65a1ba311b4f128f3953ae4eb32803 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/7a65a1ba311b4f128f3953ae4eb32803 2024-11-11T20:47:43,304 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/88ebb6e9c633467a8cf83e6e78bb99b2 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/88ebb6e9c633467a8cf83e6e78bb99b2 2024-11-11T20:47:43,305 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/1b90f77d77334d21ab546e53915af6a9 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/1b90f77d77334d21ab546e53915af6a9 2024-11-11T20:47:43,306 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/50e6b7e5198548148ca743fd66e28a90 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/50e6b7e5198548148ca743fd66e28a90 2024-11-11T20:47:43,307 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/35f83c534fa340c182b6e62cf35f33f6 2024-11-11T20:47:43,308 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/d3629cc16f074023ae373191cb3fe681 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/d3629cc16f074023ae373191cb3fe681 2024-11-11T20:47:43,309 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/213af03e37a74ec0a1b23b068b8f496e to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/213af03e37a74ec0a1b23b068b8f496e 2024-11-11T20:47:43,310 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/883b6cb4dc7f4b12a2158fb4da3aabdd to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/883b6cb4dc7f4b12a2158fb4da3aabdd 2024-11-11T20:47:43,316 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-11T20:47:43,317 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. 2024-11-11T20:47:43,317 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 31e28af4302d239e8d29f4d067b98d43: Waiting for close lock at 1731358063268Running coprocessor pre-close hooks at 1731358063268Disabling compacts and flushes for region at 1731358063268Disabling writes for close at 1731358063269 (+1 ms)Obtaining lock to block concurrent updates at 1731358063269Preparing flush snapshotting stores in 31e28af4302d239e8d29f4d067b98d43 at 1731358063269Finished memstore snapshotting TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., syncing WAL and waiting on mvcc, flushsize=dataSize=8608, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1731358063269Flushing stores of TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. at 1731358063271 (+2 ms)Flushing 31e28af4302d239e8d29f4d067b98d43/info: creating writer at 1731358063271Flushing 31e28af4302d239e8d29f4d067b98d43/info: appending metadata at 1731358063278 (+7 ms)Flushing 31e28af4302d239e8d29f4d067b98d43/info: closing flushed file at 1731358063278Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1204cb1c: reopening flushed file at 1731358063289 (+11 ms)Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 31e28af4302d239e8d29f4d067b98d43 in 27ms, sequenceid=127, compaction requested=true at 1731358063297 (+8 ms)Writing region close event to WAL at 1731358063312 (+15 ms)Running coprocessor post-close hooks at 1731358063317 (+5 ms)Closed at 1731358063317 2024-11-11T20:47:43,319 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,320 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=31e28af4302d239e8d29f4d067b98d43, regionState=CLOSED 2024-11-11T20:47:43,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 31e28af4302d239e8d29f4d067b98d43, server=51ca66f7ee3c,45533,1731358047907 because future has completed 2024-11-11T20:47:43,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-11T20:47:43,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 31e28af4302d239e8d29f4d067b98d43, server=51ca66f7ee3c,45533,1731358047907 in 216 msec 2024-11-11T20:47:43,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-11T20:47:43,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31e28af4302d239e8d29f4d067b98d43, UNASSIGN in 224 msec 2024-11-11T20:47:43,334 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:43,338 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=31e28af4302d239e8d29f4d067b98d43, threads=3 2024-11-11T20:47:43,340 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/58fda7b7050e4ce2b8bc3371bf4332f5 for region: 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,340 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/60d2284b6f554f8eb691c71dd5237733 for region: 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,340 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/ab7f0a443e19411a9237bb375e5c07e3 for region: 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,349 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/58fda7b7050e4ce2b8bc3371bf4332f5, top=true 2024-11-11T20:47:43,349 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/ab7f0a443e19411a9237bb375e5c07e3, top=true 2024-11-11T20:47:43,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741849_1025 (size=27) 2024-11-11T20:47:43,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741849_1025 (size=27) 2024-11-11T20:47:43,359 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-ab7f0a443e19411a9237bb375e5c07e3 for child: e2da7cb8257b39902dcb50ef5e5e3cc7, parent: 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,359 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/ab7f0a443e19411a9237bb375e5c07e3 for region: 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,364 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-58fda7b7050e4ce2b8bc3371bf4332f5 for child: e2da7cb8257b39902dcb50ef5e5e3cc7, parent: 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,364 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/58fda7b7050e4ce2b8bc3371bf4332f5 for region: 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741850_1026 (size=27) 2024-11-11T20:47:43,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741850_1026 (size=27) 2024-11-11T20:47:43,369 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/60d2284b6f554f8eb691c71dd5237733 for region: 31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:47:43,371 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 31e28af4302d239e8d29f4d067b98d43 Daughter A: [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43] storefiles, Daughter B: [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-58fda7b7050e4ce2b8bc3371bf4332f5, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-ab7f0a443e19411a9237bb375e5c07e3] storefiles. 2024-11-11T20:47:43,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741851_1027 (size=71) 2024-11-11T20:47:43,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741851_1027 (size=71) 2024-11-11T20:47:43,382 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:43,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741852_1028 (size=71) 2024-11-11T20:47:43,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741852_1028 (size=71) 2024-11-11T20:47:43,395 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:43,407 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-11T20:47:43,409 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-11T20:47:43,412 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731358063411"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731358063411"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731358063411"}]},"ts":"1731358063411"} 2024-11-11T20:47:43,412 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731358063411"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731358063411"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731358063411"}]},"ts":"1731358063411"} 2024-11-11T20:47:43,412 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731358063411"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731358063411"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731358063411"}]},"ts":"1731358063411"} 2024-11-11T20:47:43,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=17b383949ba3671cbe9bcb0fc4722887, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e2da7cb8257b39902dcb50ef5e5e3cc7, ASSIGN}] 2024-11-11T20:47:43,430 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=17b383949ba3671cbe9bcb0fc4722887, ASSIGN 2024-11-11T20:47:43,430 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e2da7cb8257b39902dcb50ef5e5e3cc7, ASSIGN 2024-11-11T20:47:43,431 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e2da7cb8257b39902dcb50ef5e5e3cc7, ASSIGN; state=SPLITTING_NEW, location=51ca66f7ee3c,45533,1731358047907; forceNewPlan=false, retain=false 2024-11-11T20:47:43,431 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=17b383949ba3671cbe9bcb0fc4722887, ASSIGN; state=SPLITTING_NEW, location=51ca66f7ee3c,45533,1731358047907; forceNewPlan=false, retain=false 2024-11-11T20:47:43,582 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=e2da7cb8257b39902dcb50ef5e5e3cc7, regionState=OPENING, regionLocation=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:43,582 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=17b383949ba3671cbe9bcb0fc4722887, regionState=OPENING, regionLocation=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:43,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=17b383949ba3671cbe9bcb0fc4722887, ASSIGN because future has completed 2024-11-11T20:47:43,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 17b383949ba3671cbe9bcb0fc4722887, server=51ca66f7ee3c,45533,1731358047907}] 2024-11-11T20:47:43,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e2da7cb8257b39902dcb50ef5e5e3cc7, ASSIGN because future has completed 2024-11-11T20:47:43,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2da7cb8257b39902dcb50ef5e5e3cc7, server=51ca66f7ee3c,45533,1731358047907}] 2024-11-11T20:47:43,743 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. 2024-11-11T20:47:43,743 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 17b383949ba3671cbe9bcb0fc4722887, NAME => 'TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-11T20:47:43,743 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,743 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:47:43,744 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,744 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,745 INFO [StoreOpener-17b383949ba3671cbe9bcb0fc4722887-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,746 INFO [StoreOpener-17b383949ba3671cbe9bcb0fc4722887-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 17b383949ba3671cbe9bcb0fc4722887 columnFamilyName info 2024-11-11T20:47:43,747 DEBUG [StoreOpener-17b383949ba3671cbe9bcb0fc4722887-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:43,761 DEBUG [StoreOpener-17b383949ba3671cbe9bcb0fc4722887-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43->hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/60d2284b6f554f8eb691c71dd5237733-bottom 2024-11-11T20:47:43,762 INFO [StoreOpener-17b383949ba3671cbe9bcb0fc4722887-1 {}] regionserver.HStore(327): Store=17b383949ba3671cbe9bcb0fc4722887/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:47:43,762 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,763 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,764 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,764 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,764 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,766 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,766 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 17b383949ba3671cbe9bcb0fc4722887; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772762, jitterRate=-0.017383337020874023}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T20:47:43,767 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:47:43,767 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 17b383949ba3671cbe9bcb0fc4722887: Running coprocessor pre-open hook at 1731358063744Writing region info on filesystem at 1731358063744Initializing all the Stores at 1731358063745 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358063745Cleaning up temporary data from old regions at 1731358063764 (+19 ms)Running coprocessor post-open hooks at 1731358063767 (+3 ms)Region opened successfully at 1731358063767 2024-11-11T20:47:43,768 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887., pid=12, masterSystemTime=1731358063739 2024-11-11T20:47:43,768 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 17b383949ba3671cbe9bcb0fc4722887:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:47:43,768 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:43,768 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-11T20:47:43,769 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. 2024-11-11T20:47:43,769 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1541): 17b383949ba3671cbe9bcb0fc4722887/info is initiating minor compaction (all files) 2024-11-11T20:47:43,769 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 17b383949ba3671cbe9bcb0fc4722887/info in TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. 2024-11-11T20:47:43,769 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43->hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/60d2284b6f554f8eb691c71dd5237733-bottom] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/.tmp, totalSize=85.3 K 2024-11-11T20:47:43,770 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731358058840 2024-11-11T20:47:43,770 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. 2024-11-11T20:47:43,770 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. 2024-11-11T20:47:43,770 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:47:43,770 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => e2da7cb8257b39902dcb50ef5e5e3cc7, NAME => 'TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-11T20:47:43,770 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,771 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:47:43,771 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,771 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,771 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=17b383949ba3671cbe9bcb0fc4722887, regionState=OPEN, openSeqNum=131, regionLocation=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:43,772 INFO [StoreOpener-e2da7cb8257b39902dcb50ef5e5e3cc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,773 INFO [StoreOpener-e2da7cb8257b39902dcb50ef5e5e3cc7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2da7cb8257b39902dcb50ef5e5e3cc7 columnFamilyName info 2024-11-11T20:47:43,773 DEBUG [StoreOpener-e2da7cb8257b39902dcb50ef5e5e3cc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:47:43,773 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-11T20:47:43,773 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-11T20:47:43,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-11T20:47:43,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 17b383949ba3671cbe9bcb0fc4722887, server=51ca66f7ee3c,45533,1731358047907 because future has completed 2024-11-11T20:47:43,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-11T20:47:43,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 17b383949ba3671cbe9bcb0fc4722887, server=51ca66f7ee3c,45533,1731358047907 in 194 msec 2024-11-11T20:47:43,785 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=17b383949ba3671cbe9bcb0fc4722887, ASSIGN in 355 msec 2024-11-11T20:47:43,785 DEBUG [StoreOpener-e2da7cb8257b39902dcb50ef5e5e3cc7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43->hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/60d2284b6f554f8eb691c71dd5237733-top 2024-11-11T20:47:43,791 DEBUG [StoreOpener-e2da7cb8257b39902dcb50ef5e5e3cc7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-58fda7b7050e4ce2b8bc3371bf4332f5 2024-11-11T20:47:43,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/info/874426d8dcad45c8b9e158f31b9de6d9 is 193, key is TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7./info:regioninfo/1731358063582/Put/seqid=0 2024-11-11T20:47:43,792 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 17b383949ba3671cbe9bcb0fc4722887#info#compaction#66 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:47:43,792 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/.tmp/info/3d91cd3ad43547fa8b036fc7aaf74bc9 is 1080, key is row0001/info:/1731358058840/Put/seqid=0 2024-11-11T20:47:43,797 DEBUG [StoreOpener-e2da7cb8257b39902dcb50ef5e5e3cc7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-ab7f0a443e19411a9237bb375e5c07e3 2024-11-11T20:47:43,797 INFO [StoreOpener-e2da7cb8257b39902dcb50ef5e5e3cc7-1 {}] regionserver.HStore(327): Store=e2da7cb8257b39902dcb50ef5e5e3cc7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:47:43,797 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741853_1029 (size=70862) 2024-11-11T20:47:43,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741853_1029 (size=70862) 2024-11-11T20:47:43,798 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741854_1030 (size=9847) 2024-11-11T20:47:43,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/info/874426d8dcad45c8b9e158f31b9de6d9 2024-11-11T20:47:43,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741854_1030 (size=9847) 2024-11-11T20:47:43,799 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,800 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,800 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,801 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,802 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened e2da7cb8257b39902dcb50ef5e5e3cc7; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792600, jitterRate=0.007843077182769775}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T20:47:43,802 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:43,802 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for e2da7cb8257b39902dcb50ef5e5e3cc7: Running coprocessor pre-open hook at 1731358063771Writing region info on filesystem at 1731358063771Initializing all the Stores at 1731358063771Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358063772 (+1 ms)Cleaning up temporary data from old regions at 1731358063800 (+28 ms)Running coprocessor post-open hooks at 1731358063802 (+2 ms)Region opened successfully at 1731358063802 2024-11-11T20:47:43,803 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., pid=13, masterSystemTime=1731358063739 2024-11-11T20:47:43,804 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store e2da7cb8257b39902dcb50ef5e5e3cc7:info, priority=-2147483648, current under compaction store size is 2 2024-11-11T20:47:43,804 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T20:47:43,804 DEBUG [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:47:43,805 INFO [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:47:43,805 DEBUG [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] regionserver.HStore(1541): e2da7cb8257b39902dcb50ef5e5e3cc7/info is initiating minor compaction (all files) 2024-11-11T20:47:43,805 INFO [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e2da7cb8257b39902dcb50ef5e5e3cc7/info in TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:47:43,805 INFO [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43->hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/60d2284b6f554f8eb691c71dd5237733-top, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-ab7f0a443e19411a9237bb375e5c07e3, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-58fda7b7050e4ce2b8bc3371bf4332f5] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp, totalSize=116.0 K 2024-11-11T20:47:43,806 DEBUG [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] compactions.Compactor(225): Compacting 60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1731358058840 2024-11-11T20:47:43,806 DEBUG [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:47:43,806 DEBUG [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-ab7f0a443e19411a9237bb375e5c07e3, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731358063022 2024-11-11T20:47:43,807 INFO [RS_OPEN_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:47:43,807 DEBUG [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-58fda7b7050e4ce2b8bc3371bf4332f5, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731358063047 2024-11-11T20:47:43,807 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=e2da7cb8257b39902dcb50ef5e5e3cc7, regionState=OPEN, openSeqNum=131, regionLocation=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:43,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2da7cb8257b39902dcb50ef5e5e3cc7, server=51ca66f7ee3c,45533,1731358047907 because future has completed 2024-11-11T20:47:43,812 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36227 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=51ca66f7ee3c,45533,1731358047907, table=TestLogRolling-testLogRolling, region=e2da7cb8257b39902dcb50ef5e5e3cc7. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-11T20:47:43,816 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-11T20:47:43,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure e2da7cb8257b39902dcb50ef5e5e3cc7, server=51ca66f7ee3c,45533,1731358047907 in 224 msec 2024-11-11T20:47:43,820 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-11T20:47:43,821 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e2da7cb8257b39902dcb50ef5e5e3cc7, ASSIGN in 389 msec 2024-11-11T20:47:43,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/ns/d5f66e08908f4aabb9163c9e5d3f0536 is 43, key is default/ns:d/1731358048697/Put/seqid=0 2024-11-11T20:47:43,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=31e28af4302d239e8d29f4d067b98d43, daughterA=17b383949ba3671cbe9bcb0fc4722887, daughterB=e2da7cb8257b39902dcb50ef5e5e3cc7 in 731 msec 2024-11-11T20:47:43,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741855_1031 (size=5153) 2024-11-11T20:47:43,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741855_1031 (size=5153) 2024-11-11T20:47:43,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/ns/d5f66e08908f4aabb9163c9e5d3f0536 2024-11-11T20:47:43,836 INFO [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2da7cb8257b39902dcb50ef5e5e3cc7#info#compaction#69 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:47:43,837 DEBUG [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/e6d6e8e865904ff09ce6ecddd224b8d0 is 1080, key is row0062/info:/1731358060983/Put/seqid=0 2024-11-11T20:47:43,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741856_1032 (size=42984) 2024-11-11T20:47:43,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741856_1032 (size=42984) 2024-11-11T20:47:43,851 DEBUG [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/e6d6e8e865904ff09ce6ecddd224b8d0 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e6d6e8e865904ff09ce6ecddd224b8d0 2024-11-11T20:47:43,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/table/427c1c6cbd2644b4bb112c8370daef0a is 65, key is TestLogRolling-testLogRolling/table:state/1731358049113/Put/seqid=0 2024-11-11T20:47:43,859 INFO [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e2da7cb8257b39902dcb50ef5e5e3cc7/info of e2da7cb8257b39902dcb50ef5e5e3cc7 into e6d6e8e865904ff09ce6ecddd224b8d0(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:47:43,859 DEBUG [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:47:43,859 INFO [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., storeName=e2da7cb8257b39902dcb50ef5e5e3cc7/info, priority=13, startTime=1731358063804; duration=0sec 2024-11-11T20:47:43,859 DEBUG [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:43,859 DEBUG [RS:0;51ca66f7ee3c:45533-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2da7cb8257b39902dcb50ef5e5e3cc7:info 2024-11-11T20:47:43,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741857_1033 (size=5340) 2024-11-11T20:47:43,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741857_1033 (size=5340) 2024-11-11T20:47:43,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/table/427c1c6cbd2644b4bb112c8370daef0a 2024-11-11T20:47:43,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/info/874426d8dcad45c8b9e158f31b9de6d9 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/info/874426d8dcad45c8b9e158f31b9de6d9 2024-11-11T20:47:43,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/info/874426d8dcad45c8b9e158f31b9de6d9, entries=30, sequenceid=17, filesize=9.6 K 2024-11-11T20:47:43,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/ns/d5f66e08908f4aabb9163c9e5d3f0536 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/ns/d5f66e08908f4aabb9163c9e5d3f0536 2024-11-11T20:47:43,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/ns/d5f66e08908f4aabb9163c9e5d3f0536, entries=2, sequenceid=17, filesize=5.0 K 2024-11-11T20:47:43,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/table/427c1c6cbd2644b4bb112c8370daef0a as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/table/427c1c6cbd2644b4bb112c8370daef0a 2024-11-11T20:47:43,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/table/427c1c6cbd2644b4bb112c8370daef0a, entries=2, sequenceid=17, filesize=5.2 K 2024-11-11T20:47:43,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 122ms, sequenceid=17, compaction requested=false 2024-11-11T20:47:43,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T20:47:43,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:43,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:44,206 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/.tmp/info/3d91cd3ad43547fa8b036fc7aaf74bc9 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/info/3d91cd3ad43547fa8b036fc7aaf74bc9 2024-11-11T20:47:44,213 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 17b383949ba3671cbe9bcb0fc4722887/info of 17b383949ba3671cbe9bcb0fc4722887 into 3d91cd3ad43547fa8b036fc7aaf74bc9(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:47:44,213 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 17b383949ba3671cbe9bcb0fc4722887: 2024-11-11T20:47:44,213 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887., storeName=17b383949ba3671cbe9bcb0fc4722887/info, priority=15, startTime=1731358063768; duration=0sec 2024-11-11T20:47:44,213 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:44,213 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 17b383949ba3671cbe9bcb0fc4722887:info 2024-11-11T20:47:44,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:44,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:45,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34524 deadline: 1731358075064, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. is not online on 51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:45,090 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., hostname=51ca66f7ee3c,45533,1731358047907, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., hostname=51ca66f7ee3c,45533,1731358047907, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. is not online on 51ca66f7ee3c,45533,1731358047907 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T20:47:45,091 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., hostname=51ca66f7ee3c,45533,1731358047907, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43. is not online on 51ca66f7ee3c,45533,1731358047907 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T20:47:45,091 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731358048736.31e28af4302d239e8d29f4d067b98d43., hostname=51ca66f7ee3c,45533,1731358047907, seqNum=2 from cache 2024-11-11T20:47:45,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:45,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:46,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:46,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:47,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:47,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:48,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,853 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T20:47:48,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,896 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,900 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T20:47:48,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:48,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:49,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:49,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:50,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:50,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:51,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:51,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:52,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:52,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:53,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:53,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:54,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:54,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:55,187 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., hostname=51ca66f7ee3c,45533,1731358047907, seqNum=131] 2024-11-11T20:47:55,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:55,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:47:55,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/b2dcaaa7783845b4af9a9ff5d84a4deb is 1080, key is row0097/info:/1731358075189/Put/seqid=0 2024-11-11T20:47:55,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741858_1034 (size=12516) 2024-11-11T20:47:55,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/b2dcaaa7783845b4af9a9ff5d84a4deb 2024-11-11T20:47:55,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741858_1034 (size=12516) 2024-11-11T20:47:55,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/b2dcaaa7783845b4af9a9ff5d84a4deb as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/b2dcaaa7783845b4af9a9ff5d84a4deb 2024-11-11T20:47:55,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/b2dcaaa7783845b4af9a9ff5d84a4deb, entries=7, sequenceid=141, filesize=12.2 K 2024-11-11T20:47:55,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 44ms, sequenceid=141, compaction requested=false 2024-11-11T20:47:55,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:47:55,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:55,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-11T20:47:55,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/82dc47634071481cb875aad797372453 is 1080, key is row0104/info:/1731358075208/Put/seqid=0 2024-11-11T20:47:55,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741859_1035 (size=22238) 2024-11-11T20:47:55,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741859_1035 (size=22238) 2024-11-11T20:47:55,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/82dc47634071481cb875aad797372453 2024-11-11T20:47:55,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/82dc47634071481cb875aad797372453 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/82dc47634071481cb875aad797372453 2024-11-11T20:47:55,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/82dc47634071481cb875aad797372453, entries=16, sequenceid=160, filesize=21.7 K 2024-11-11T20:47:55,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 41ms, sequenceid=160, compaction requested=true 2024-11-11T20:47:55,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:47:55,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2da7cb8257b39902dcb50ef5e5e3cc7:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:47:55,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:55,295 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:47:55,296 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77738 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:47:55,296 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1541): e2da7cb8257b39902dcb50ef5e5e3cc7/info is initiating minor compaction (all files) 2024-11-11T20:47:55,296 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e2da7cb8257b39902dcb50ef5e5e3cc7/info in TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:47:55,296 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e6d6e8e865904ff09ce6ecddd224b8d0, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/b2dcaaa7783845b4af9a9ff5d84a4deb, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/82dc47634071481cb875aad797372453] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp, totalSize=75.9 K 2024-11-11T20:47:55,297 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting e6d6e8e865904ff09ce6ecddd224b8d0, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731358060983 2024-11-11T20:47:55,297 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting b2dcaaa7783845b4af9a9ff5d84a4deb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731358075189 2024-11-11T20:47:55,297 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 82dc47634071481cb875aad797372453, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1731358075208 2024-11-11T20:47:55,316 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2da7cb8257b39902dcb50ef5e5e3cc7#info#compaction#73 average throughput is 29.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:47:55,316 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/d4f60e91475341f1aecee2a45472baeb is 1080, key is row0062/info:/1731358060983/Put/seqid=0 2024-11-11T20:47:55,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741860_1036 (size=67948) 2024-11-11T20:47:55,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741860_1036 (size=67948) 2024-11-11T20:47:55,341 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/d4f60e91475341f1aecee2a45472baeb as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/d4f60e91475341f1aecee2a45472baeb 2024-11-11T20:47:55,351 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e2da7cb8257b39902dcb50ef5e5e3cc7/info of e2da7cb8257b39902dcb50ef5e5e3cc7 into d4f60e91475341f1aecee2a45472baeb(size=66.4 K), total size for store is 66.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:47:55,351 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:47:55,351 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., storeName=e2da7cb8257b39902dcb50ef5e5e3cc7/info, priority=13, startTime=1731358075295; duration=0sec 2024-11-11T20:47:55,352 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:55,352 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2da7cb8257b39902dcb50ef5e5e3cc7:info 2024-11-11T20:47:55,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:55,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:56,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:56,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:57,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:57,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-11T20:47:57,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/1c31ce7a10844536bf454131691aec28 is 1080, key is row0120/info:/1731358075255/Put/seqid=0 2024-11-11T20:47:57,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741861_1037 (size=15750) 2024-11-11T20:47:57,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741861_1037 (size=15750) 2024-11-11T20:47:57,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/1c31ce7a10844536bf454131691aec28 2024-11-11T20:47:57,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/1c31ce7a10844536bf454131691aec28 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/1c31ce7a10844536bf454131691aec28 2024-11-11T20:47:57,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/1c31ce7a10844536bf454131691aec28, entries=10, sequenceid=174, filesize=15.4 K 2024-11-11T20:47:57,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=10.51 KB/10760 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 23ms, sequenceid=174, compaction requested=false 2024-11-11T20:47:57,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:47:57,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:47:57,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T20:47:57,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/6c4d09e5b32648a5a01b2a7684334ac0 is 1080, key is row0130/info:/1731358077283/Put/seqid=0 2024-11-11T20:47:57,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741862_1038 (size=17906) 2024-11-11T20:47:57,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741862_1038 (size=17906) 2024-11-11T20:47:57,339 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/6c4d09e5b32648a5a01b2a7684334ac0 2024-11-11T20:47:57,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/6c4d09e5b32648a5a01b2a7684334ac0 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/6c4d09e5b32648a5a01b2a7684334ac0 2024-11-11T20:47:57,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e2da7cb8257b39902dcb50ef5e5e3cc7, server=51ca66f7ee3c,45533,1731358047907 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-11T20:47:57,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34524 deadline: 1731358087346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e2da7cb8257b39902dcb50ef5e5e3cc7, server=51ca66f7ee3c,45533,1731358047907 2024-11-11T20:47:57,348 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., hostname=51ca66f7ee3c,45533,1731358047907, seqNum=131 , the old value is region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., hostname=51ca66f7ee3c,45533,1731358047907, seqNum=131, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e2da7cb8257b39902dcb50ef5e5e3cc7, server=51ca66f7ee3c,45533,1731358047907 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T20:47:57,348 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., hostname=51ca66f7ee3c,45533,1731358047907, seqNum=131 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e2da7cb8257b39902dcb50ef5e5e3cc7, server=51ca66f7ee3c,45533,1731358047907 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T20:47:57,348 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., hostname=51ca66f7ee3c,45533,1731358047907, seqNum=131 because the exception is null or not the one we care about 2024-11-11T20:47:57,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/6c4d09e5b32648a5a01b2a7684334ac0, entries=12, sequenceid=189, filesize=17.5 K 2024-11-11T20:47:57,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 46ms, sequenceid=189, compaction requested=true 2024-11-11T20:47:57,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:47:57,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2da7cb8257b39902dcb50ef5e5e3cc7:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:47:57,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:57,354 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:47:57,355 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101604 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:47:57,355 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1541): e2da7cb8257b39902dcb50ef5e5e3cc7/info is initiating minor compaction (all files) 2024-11-11T20:47:57,355 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e2da7cb8257b39902dcb50ef5e5e3cc7/info in TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:47:57,355 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/d4f60e91475341f1aecee2a45472baeb, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/1c31ce7a10844536bf454131691aec28, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/6c4d09e5b32648a5a01b2a7684334ac0] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp, totalSize=99.2 K 2024-11-11T20:47:57,355 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting d4f60e91475341f1aecee2a45472baeb, keycount=58, bloomtype=ROW, size=66.4 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1731358060983 2024-11-11T20:47:57,356 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1c31ce7a10844536bf454131691aec28, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731358075255 2024-11-11T20:47:57,356 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6c4d09e5b32648a5a01b2a7684334ac0, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731358077283 2024-11-11T20:47:57,368 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2da7cb8257b39902dcb50ef5e5e3cc7#info#compaction#76 average throughput is 27.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:47:57,369 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/c7d2cc3bc30044fd88a783fcff8b4d4c is 1080, key is row0062/info:/1731358060983/Put/seqid=0 2024-11-11T20:47:57,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741863_1039 (size=91843) 2024-11-11T20:47:57,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741863_1039 (size=91843) 2024-11-11T20:47:57,386 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/c7d2cc3bc30044fd88a783fcff8b4d4c as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/c7d2cc3bc30044fd88a783fcff8b4d4c 2024-11-11T20:47:57,394 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e2da7cb8257b39902dcb50ef5e5e3cc7/info of e2da7cb8257b39902dcb50ef5e5e3cc7 into c7d2cc3bc30044fd88a783fcff8b4d4c(size=89.7 K), total size for store is 89.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:47:57,394 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:47:57,394 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., storeName=e2da7cb8257b39902dcb50ef5e5e3cc7/info, priority=13, startTime=1731358077354; duration=0sec 2024-11-11T20:47:57,395 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:47:57,395 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2da7cb8257b39902dcb50ef5e5e3cc7:info 2024-11-11T20:47:57,846 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T20:47:57,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:57,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:58,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:58,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:59,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:47:59,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:00,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:00,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:01,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:01,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:02,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:02,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:03,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:03,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:04,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:04,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:05,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:05,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:06,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:06,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:07,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:07,381 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-11T20:48:07,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/4bcb0eb685f14492b9c90e584b312370 is 1080, key is row0142/info:/1731358077309/Put/seqid=0 2024-11-11T20:48:07,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741864_1040 (size=24394) 2024-11-11T20:48:07,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741864_1040 (size=24394) 2024-11-11T20:48:07,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/4bcb0eb685f14492b9c90e584b312370 2024-11-11T20:48:07,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/4bcb0eb685f14492b9c90e584b312370 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4bcb0eb685f14492b9c90e584b312370 2024-11-11T20:48:07,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4bcb0eb685f14492b9c90e584b312370, entries=18, sequenceid=211, filesize=23.8 K 2024-11-11T20:48:07,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=1.05 KB/1076 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 61ms, sequenceid=211, compaction requested=false 2024-11-11T20:48:07,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:07,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:07,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:08,756 INFO [master/51ca66f7ee3c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T20:48:08,756 INFO [master/51ca66f7ee3c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T20:48:08,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:08,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:09,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:09,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:48:09,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/f4fac091d9a343a4b892e6d035da56b0 is 1080, key is row0160/info:/1731358087382/Put/seqid=0 2024-11-11T20:48:09,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741865_1041 (size=12516) 2024-11-11T20:48:09,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741865_1041 (size=12516) 2024-11-11T20:48:09,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/f4fac091d9a343a4b892e6d035da56b0 2024-11-11T20:48:09,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/f4fac091d9a343a4b892e6d035da56b0 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/f4fac091d9a343a4b892e6d035da56b0 2024-11-11T20:48:09,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/f4fac091d9a343a4b892e6d035da56b0, entries=7, sequenceid=221, filesize=12.2 K 2024-11-11T20:48:09,426 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8608 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 30ms, sequenceid=221, compaction requested=true 2024-11-11T20:48:09,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:09,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2da7cb8257b39902dcb50ef5e5e3cc7:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:48:09,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:48:09,427 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:48:09,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:09,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-11T20:48:09,430 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:48:09,430 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1541): e2da7cb8257b39902dcb50ef5e5e3cc7/info is initiating minor compaction (all files) 2024-11-11T20:48:09,430 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e2da7cb8257b39902dcb50ef5e5e3cc7/info in TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:48:09,431 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/c7d2cc3bc30044fd88a783fcff8b4d4c, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4bcb0eb685f14492b9c90e584b312370, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/f4fac091d9a343a4b892e6d035da56b0] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp, totalSize=125.7 K 2024-11-11T20:48:09,431 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting c7d2cc3bc30044fd88a783fcff8b4d4c, keycount=80, bloomtype=ROW, size=89.7 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731358060983 2024-11-11T20:48:09,432 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4bcb0eb685f14492b9c90e584b312370, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731358077309 2024-11-11T20:48:09,432 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting f4fac091d9a343a4b892e6d035da56b0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1731358087382 2024-11-11T20:48:09,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/83b35c66d8774f30a33e5bdedc1d1486 is 1080, key is row0167/info:/1731358089398/Put/seqid=0 2024-11-11T20:48:09,461 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2da7cb8257b39902dcb50ef5e5e3cc7#info#compaction#80 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:48:09,461 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/353dfbbf1d4b4ce1b4baff7f821a366c is 1080, key is row0062/info:/1731358060983/Put/seqid=0 2024-11-11T20:48:09,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741866_1042 (size=14672) 2024-11-11T20:48:09,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741866_1042 (size=14672) 2024-11-11T20:48:09,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/83b35c66d8774f30a33e5bdedc1d1486 2024-11-11T20:48:09,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/83b35c66d8774f30a33e5bdedc1d1486 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/83b35c66d8774f30a33e5bdedc1d1486 2024-11-11T20:48:09,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741867_1043 (size=118899) 2024-11-11T20:48:09,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741867_1043 (size=118899) 2024-11-11T20:48:09,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/83b35c66d8774f30a33e5bdedc1d1486, entries=9, sequenceid=233, filesize=14.3 K 2024-11-11T20:48:09,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=17.86 KB/18292 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 65ms, sequenceid=233, compaction requested=false 2024-11-11T20:48:09,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:09,494 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/353dfbbf1d4b4ce1b4baff7f821a366c as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/353dfbbf1d4b4ce1b4baff7f821a366c 2024-11-11T20:48:09,500 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e2da7cb8257b39902dcb50ef5e5e3cc7/info of e2da7cb8257b39902dcb50ef5e5e3cc7 into 353dfbbf1d4b4ce1b4baff7f821a366c(size=116.1 K), total size for store is 130.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:48:09,500 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:09,500 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., storeName=e2da7cb8257b39902dcb50ef5e5e3cc7/info, priority=13, startTime=1731358089426; duration=0sec 2024-11-11T20:48:09,500 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:48:09,500 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2da7cb8257b39902dcb50ef5e5e3cc7:info 2024-11-11T20:48:09,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:09,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:10,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:10,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:11,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:11,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-11T20:48:11,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/7c2705c360684051a7764b0034d6df63 is 1080, key is row0176/info:/1731358089430/Put/seqid=0 2024-11-11T20:48:11,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741868_1044 (size=24394) 2024-11-11T20:48:11,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741868_1044 (size=24394) 2024-11-11T20:48:11,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/7c2705c360684051a7764b0034d6df63 2024-11-11T20:48:11,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/7c2705c360684051a7764b0034d6df63 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/7c2705c360684051a7764b0034d6df63 2024-11-11T20:48:11,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/7c2705c360684051a7764b0034d6df63, entries=18, sequenceid=255, filesize=23.8 K 2024-11-11T20:48:11,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 27ms, sequenceid=255, compaction requested=true 2024-11-11T20:48:11,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:11,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2da7cb8257b39902dcb50ef5e5e3cc7:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:48:11,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:48:11,507 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:48:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:11,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T20:48:11,509 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:48:11,509 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1541): e2da7cb8257b39902dcb50ef5e5e3cc7/info is initiating minor compaction (all files) 2024-11-11T20:48:11,509 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e2da7cb8257b39902dcb50ef5e5e3cc7/info in TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:48:11,509 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/353dfbbf1d4b4ce1b4baff7f821a366c, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/83b35c66d8774f30a33e5bdedc1d1486, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/7c2705c360684051a7764b0034d6df63] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp, totalSize=154.3 K 2024-11-11T20:48:11,510 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 353dfbbf1d4b4ce1b4baff7f821a366c, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1731358060983 2024-11-11T20:48:11,510 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 83b35c66d8774f30a33e5bdedc1d1486, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1731358089398 2024-11-11T20:48:11,510 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c2705c360684051a7764b0034d6df63, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1731358089430 2024-11-11T20:48:11,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/4349d1bea5e940289561bd2b04950bee is 1080, key is row0194/info:/1731358091483/Put/seqid=0 2024-11-11T20:48:11,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741869_1045 (size=17918) 2024-11-11T20:48:11,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741869_1045 (size=17918) 2024-11-11T20:48:11,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/4349d1bea5e940289561bd2b04950bee 2024-11-11T20:48:11,529 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2da7cb8257b39902dcb50ef5e5e3cc7#info#compaction#83 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:48:11,529 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/15dc26d12ddd480fb6ddaf871cd902c2 is 1080, key is row0062/info:/1731358060983/Put/seqid=0 2024-11-11T20:48:11,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741870_1046 (size=148312) 2024-11-11T20:48:11,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741870_1046 (size=148312) 2024-11-11T20:48:11,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/4349d1bea5e940289561bd2b04950bee as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4349d1bea5e940289561bd2b04950bee 2024-11-11T20:48:11,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4349d1bea5e940289561bd2b04950bee, entries=12, sequenceid=270, filesize=17.5 K 2024-11-11T20:48:11,540 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/15dc26d12ddd480fb6ddaf871cd902c2 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/15dc26d12ddd480fb6ddaf871cd902c2 2024-11-11T20:48:11,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 33ms, sequenceid=270, compaction requested=false 2024-11-11T20:48:11,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:11,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-11T20:48:11,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/12d16c317ddb498188df1c2268221588 is 1080, key is row0206/info:/1731358091509/Put/seqid=0 2024-11-11T20:48:11,546 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e2da7cb8257b39902dcb50ef5e5e3cc7/info of e2da7cb8257b39902dcb50ef5e5e3cc7 into 15dc26d12ddd480fb6ddaf871cd902c2(size=144.8 K), total size for store is 162.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:48:11,546 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:11,546 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., storeName=e2da7cb8257b39902dcb50ef5e5e3cc7/info, priority=13, startTime=1731358091507; duration=0sec 2024-11-11T20:48:11,546 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:48:11,546 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2da7cb8257b39902dcb50ef5e5e3cc7:info 2024-11-11T20:48:11,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741871_1047 (size=20092) 2024-11-11T20:48:11,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741871_1047 (size=20092) 2024-11-11T20:48:11,552 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/12d16c317ddb498188df1c2268221588 2024-11-11T20:48:11,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/12d16c317ddb498188df1c2268221588 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/12d16c317ddb498188df1c2268221588 2024-11-11T20:48:11,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/12d16c317ddb498188df1c2268221588, entries=14, sequenceid=287, filesize=19.6 K 2024-11-11T20:48:11,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=5.25 KB/5380 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 23ms, sequenceid=287, compaction requested=true 2024-11-11T20:48:11,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:11,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2da7cb8257b39902dcb50ef5e5e3cc7:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:48:11,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:48:11,564 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:48:11,565 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 186322 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:48:11,565 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1541): e2da7cb8257b39902dcb50ef5e5e3cc7/info is initiating minor compaction (all files) 2024-11-11T20:48:11,565 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e2da7cb8257b39902dcb50ef5e5e3cc7/info in TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:48:11,565 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/15dc26d12ddd480fb6ddaf871cd902c2, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4349d1bea5e940289561bd2b04950bee, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/12d16c317ddb498188df1c2268221588] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp, totalSize=182.0 K 2024-11-11T20:48:11,566 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 15dc26d12ddd480fb6ddaf871cd902c2, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1731358060983 2024-11-11T20:48:11,566 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4349d1bea5e940289561bd2b04950bee, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1731358091483 2024-11-11T20:48:11,566 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting 12d16c317ddb498188df1c2268221588, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1731358091509 2024-11-11T20:48:11,577 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2da7cb8257b39902dcb50ef5e5e3cc7#info#compaction#85 average throughput is 54.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:48:11,577 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/d1d0517c56c7495eafe8dd5c0eec9c85 is 1080, key is row0062/info:/1731358060983/Put/seqid=0 2024-11-11T20:48:11,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741872_1048 (size=176476) 2024-11-11T20:48:11,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741872_1048 (size=176476) 2024-11-11T20:48:11,585 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/d1d0517c56c7495eafe8dd5c0eec9c85 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/d1d0517c56c7495eafe8dd5c0eec9c85 2024-11-11T20:48:11,591 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e2da7cb8257b39902dcb50ef5e5e3cc7/info of e2da7cb8257b39902dcb50ef5e5e3cc7 into d1d0517c56c7495eafe8dd5c0eec9c85(size=172.3 K), total size for store is 172.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:48:11,591 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:11,591 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., storeName=e2da7cb8257b39902dcb50ef5e5e3cc7/info, priority=13, startTime=1731358091564; duration=0sec 2024-11-11T20:48:11,592 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:48:11,592 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2da7cb8257b39902dcb50ef5e5e3cc7:info 2024-11-11T20:48:11,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:11,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:12,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:12,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:13,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:13,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:48:13,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/e46c17eef8224659ab2e5b059ac20e41 is 1080, key is row0220/info:/1731358091542/Put/seqid=0 2024-11-11T20:48:13,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741873_1049 (size=12523) 2024-11-11T20:48:13,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741873_1049 (size=12523) 2024-11-11T20:48:13,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/e46c17eef8224659ab2e5b059ac20e41 2024-11-11T20:48:13,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/e46c17eef8224659ab2e5b059ac20e41 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e46c17eef8224659ab2e5b059ac20e41 2024-11-11T20:48:13,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e46c17eef8224659ab2e5b059ac20e41, entries=7, sequenceid=299, filesize=12.2 K 2024-11-11T20:48:13,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 24ms, sequenceid=299, compaction requested=false 2024-11-11T20:48:13,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:13,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:13,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T20:48:13,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/ff94ca7b48284de1a2ca461f86b87a2b is 1080, key is row0227/info:/1731358093558/Put/seqid=0 2024-11-11T20:48:13,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741874_1050 (size=17918) 2024-11-11T20:48:13,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741874_1050 (size=17918) 2024-11-11T20:48:13,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/ff94ca7b48284de1a2ca461f86b87a2b 2024-11-11T20:48:13,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/ff94ca7b48284de1a2ca461f86b87a2b as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/ff94ca7b48284de1a2ca461f86b87a2b 2024-11-11T20:48:13,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/ff94ca7b48284de1a2ca461f86b87a2b, entries=12, sequenceid=314, filesize=17.5 K 2024-11-11T20:48:13,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 20ms, sequenceid=314, compaction requested=true 2024-11-11T20:48:13,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:13,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2da7cb8257b39902dcb50ef5e5e3cc7:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T20:48:13,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:48:13,601 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T20:48:13,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45533 {}] regionserver.HRegion(8855): Flush requested on e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:13,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-11T20:48:13,602 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 206917 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T20:48:13,602 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1541): e2da7cb8257b39902dcb50ef5e5e3cc7/info is initiating minor compaction (all files) 2024-11-11T20:48:13,602 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e2da7cb8257b39902dcb50ef5e5e3cc7/info in TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:48:13,603 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/d1d0517c56c7495eafe8dd5c0eec9c85, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e46c17eef8224659ab2e5b059ac20e41, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/ff94ca7b48284de1a2ca461f86b87a2b] into tmpdir=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp, totalSize=202.1 K 2024-11-11T20:48:13,603 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting d1d0517c56c7495eafe8dd5c0eec9c85, keycount=158, bloomtype=ROW, size=172.3 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1731358060983 2024-11-11T20:48:13,603 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting e46c17eef8224659ab2e5b059ac20e41, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1731358091542 2024-11-11T20:48:13,604 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] compactions.Compactor(225): Compacting ff94ca7b48284de1a2ca461f86b87a2b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1731358093558 2024-11-11T20:48:13,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/a86687e571b94e5db0f15bea38e16c28 is 1080, key is row0239/info:/1731358093582/Put/seqid=0 2024-11-11T20:48:13,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741875_1051 (size=16839) 2024-11-11T20:48:13,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741875_1051 (size=16839) 2024-11-11T20:48:13,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/a86687e571b94e5db0f15bea38e16c28 2024-11-11T20:48:13,618 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2da7cb8257b39902dcb50ef5e5e3cc7#info#compaction#89 average throughput is 60.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T20:48:13,618 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/40adb1ebe34a4848ad52d736cdb61bc9 is 1080, key is row0062/info:/1731358060983/Put/seqid=0 2024-11-11T20:48:13,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/a86687e571b94e5db0f15bea38e16c28 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/a86687e571b94e5db0f15bea38e16c28 2024-11-11T20:48:13,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741876_1052 (size=197083) 2024-11-11T20:48:13,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741876_1052 (size=197083) 2024-11-11T20:48:13,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/a86687e571b94e5db0f15bea38e16c28, entries=11, sequenceid=328, filesize=16.4 K 2024-11-11T20:48:13,628 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/40adb1ebe34a4848ad52d736cdb61bc9 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/40adb1ebe34a4848ad52d736cdb61bc9 2024-11-11T20:48:13,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=7.36 KB/7532 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 26ms, sequenceid=328, compaction requested=false 2024-11-11T20:48:13,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:13,634 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e2da7cb8257b39902dcb50ef5e5e3cc7/info of e2da7cb8257b39902dcb50ef5e5e3cc7 into 40adb1ebe34a4848ad52d736cdb61bc9(size=192.5 K), total size for store is 208.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T20:48:13,634 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:13,634 INFO [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., storeName=e2da7cb8257b39902dcb50ef5e5e3cc7/info, priority=13, startTime=1731358093601; duration=0sec 2024-11-11T20:48:13,634 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T20:48:13,634 DEBUG [RS:0;51ca66f7ee3c:45533-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2da7cb8257b39902dcb50ef5e5e3cc7:info 2024-11-11T20:48:13,669 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-11T20:48:13,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:13,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:14,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:14,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:15,619 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-11T20:48:15,620 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C45533%2C1731358047907.1731358095620 2024-11-11T20:48:15,627 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,627 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,627 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,627 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,627 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,628 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907/51ca66f7ee3c%2C45533%2C1731358047907.1731358048291 with entries=315, filesize=309.33 KB; new WAL /user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907/51ca66f7ee3c%2C45533%2C1731358047907.1731358095620 2024-11-11T20:48:15,629 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35391:35391),(127.0.0.1/127.0.0.1:33773:33773)] 2024-11-11T20:48:15,630 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907/51ca66f7ee3c%2C45533%2C1731358047907.1731358048291 is not closed yet, will try archiving it next time 2024-11-11T20:48:15,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741833_1009 (size=316763) 2024-11-11T20:48:15,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741833_1009 (size=316763) 2024-11-11T20:48:15,634 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e2da7cb8257b39902dcb50ef5e5e3cc7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T20:48:15,642 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/6aa82e4f696d4ebaa3f85b20b49b2827 is 1080, key is row0250/info:/1731358093603/Put/seqid=0 2024-11-11T20:48:15,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741878_1054 (size=12523) 2024-11-11T20:48:15,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741878_1054 (size=12523) 2024-11-11T20:48:15,653 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/6aa82e4f696d4ebaa3f85b20b49b2827 2024-11-11T20:48:15,659 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/.tmp/info/6aa82e4f696d4ebaa3f85b20b49b2827 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/6aa82e4f696d4ebaa3f85b20b49b2827 2024-11-11T20:48:15,666 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/6aa82e4f696d4ebaa3f85b20b49b2827, entries=7, sequenceid=339, filesize=12.2 K 2024-11-11T20:48:15,668 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for e2da7cb8257b39902dcb50ef5e5e3cc7 in 34ms, sequenceid=339, compaction requested=true 2024-11-11T20:48:15,668 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e2da7cb8257b39902dcb50ef5e5e3cc7: 2024-11-11T20:48:15,668 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-11T20:48:15,673 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/info/5b907346bdfa4d25b0fc50885cb93d56 is 193, key is TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7./info:regioninfo/1731358063807/Put/seqid=0 2024-11-11T20:48:15,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741879_1055 (size=6223) 2024-11-11T20:48:15,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741879_1055 (size=6223) 2024-11-11T20:48:15,683 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/info/5b907346bdfa4d25b0fc50885cb93d56 2024-11-11T20:48:15,689 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/.tmp/info/5b907346bdfa4d25b0fc50885cb93d56 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/info/5b907346bdfa4d25b0fc50885cb93d56 2024-11-11T20:48:15,697 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/info/5b907346bdfa4d25b0fc50885cb93d56, entries=5, sequenceid=21, filesize=6.1 K 2024-11-11T20:48:15,699 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=21, compaction requested=false 2024-11-11T20:48:15,699 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T20:48:15,699 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 17b383949ba3671cbe9bcb0fc4722887: 2024-11-11T20:48:15,700 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C45533%2C1731358047907.1731358095699 2024-11-11T20:48:15,722 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,723 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,723 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,723 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,723 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,723 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907/51ca66f7ee3c%2C45533%2C1731358047907.1731358095620 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907/51ca66f7ee3c%2C45533%2C1731358047907.1731358095699 2024-11-11T20:48:15,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741877_1053 (size=731) 2024-11-11T20:48:15,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741877_1053 (size=731) 2024-11-11T20:48:15,729 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907/51ca66f7ee3c%2C45533%2C1731358047907.1731358048291 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/oldWALs/51ca66f7ee3c%2C45533%2C1731358047907.1731358048291 2024-11-11T20:48:15,731 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/WALs/51ca66f7ee3c,45533,1731358047907/51ca66f7ee3c%2C45533%2C1731358047907.1731358095620 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/oldWALs/51ca66f7ee3c%2C45533%2C1731358047907.1731358095620 2024-11-11T20:48:15,733 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33773:33773),(127.0.0.1/127.0.0.1:35391:35391)] 2024-11-11T20:48:15,734 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T20:48:15,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T20:48:15,734 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:48:15,734 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:48:15,735 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:15,735 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:15,735 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T20:48:15,735 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T20:48:15,735 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1101031398, stopped=false 2024-11-11T20:48:15,735 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=51ca66f7ee3c,36227,1731358047866 2024-11-11T20:48:15,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:48:15,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:15,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:48:15,736 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:48:15,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:15,737 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:48:15,737 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:48:15,737 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:15,737 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:48:15,737 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:48:15,737 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '51ca66f7ee3c,45533,1731358047907' ***** 2024-11-11T20:48:15,737 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T20:48:15,738 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T20:48:15,738 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T20:48:15,738 INFO [RS:0;51ca66f7ee3c:45533 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T20:48:15,738 INFO [RS:0;51ca66f7ee3c:45533 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T20:48:15,738 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(3091): Received CLOSE for e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:15,738 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(3091): Received CLOSE for 17b383949ba3671cbe9bcb0fc4722887 2024-11-11T20:48:15,738 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(959): stopping server 51ca66f7ee3c,45533,1731358047907 2024-11-11T20:48:15,738 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:48:15,738 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e2da7cb8257b39902dcb50ef5e5e3cc7, disabling compactions & flushes 2024-11-11T20:48:15,738 INFO [RS:0;51ca66f7ee3c:45533 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;51ca66f7ee3c:45533. 2024-11-11T20:48:15,738 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:48:15,738 DEBUG [RS:0;51ca66f7ee3c:45533 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:48:15,738 DEBUG [RS:0;51ca66f7ee3c:45533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:15,738 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:48:15,739 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. after waiting 1 ms 2024-11-11T20:48:15,739 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:48:15,739 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T20:48:15,739 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T20:48:15,739 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T20:48:15,739 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T20:48:15,741 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43->hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/60d2284b6f554f8eb691c71dd5237733-top, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-ab7f0a443e19411a9237bb375e5c07e3, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e6d6e8e865904ff09ce6ecddd224b8d0, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-58fda7b7050e4ce2b8bc3371bf4332f5, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/b2dcaaa7783845b4af9a9ff5d84a4deb, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/d4f60e91475341f1aecee2a45472baeb, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/82dc47634071481cb875aad797372453, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/1c31ce7a10844536bf454131691aec28, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/c7d2cc3bc30044fd88a783fcff8b4d4c, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/6c4d09e5b32648a5a01b2a7684334ac0, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4bcb0eb685f14492b9c90e584b312370, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/353dfbbf1d4b4ce1b4baff7f821a366c, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/f4fac091d9a343a4b892e6d035da56b0, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/83b35c66d8774f30a33e5bdedc1d1486, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/15dc26d12ddd480fb6ddaf871cd902c2, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/7c2705c360684051a7764b0034d6df63, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4349d1bea5e940289561bd2b04950bee, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/d1d0517c56c7495eafe8dd5c0eec9c85, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/12d16c317ddb498188df1c2268221588, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e46c17eef8224659ab2e5b059ac20e41, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/ff94ca7b48284de1a2ca461f86b87a2b] to archive 2024-11-11T20:48:15,743 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T20:48:15,744 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-11T20:48:15,744 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:48:15,745 DEBUG [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(1325): Online Regions={e2da7cb8257b39902dcb50ef5e5e3cc7=TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7., 1588230740=hbase:meta,,1.1588230740, 17b383949ba3671cbe9bcb0fc4722887=TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887.} 2024-11-11T20:48:15,745 DEBUG [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 17b383949ba3671cbe9bcb0fc4722887, e2da7cb8257b39902dcb50ef5e5e3cc7 2024-11-11T20:48:15,745 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:48:15,745 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:48:15,745 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:48:15,745 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:48:15,745 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:48:15,746 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-ab7f0a443e19411a9237bb375e5c07e3 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-ab7f0a443e19411a9237bb375e5c07e3 2024-11-11T20:48:15,748 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e6d6e8e865904ff09ce6ecddd224b8d0 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e6d6e8e865904ff09ce6ecddd224b8d0 2024-11-11T20:48:15,750 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-58fda7b7050e4ce2b8bc3371bf4332f5 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/TestLogRolling-testLogRolling=31e28af4302d239e8d29f4d067b98d43-58fda7b7050e4ce2b8bc3371bf4332f5 2024-11-11T20:48:15,752 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/b2dcaaa7783845b4af9a9ff5d84a4deb to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/b2dcaaa7783845b4af9a9ff5d84a4deb 2024-11-11T20:48:15,754 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/d4f60e91475341f1aecee2a45472baeb to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/d4f60e91475341f1aecee2a45472baeb 2024-11-11T20:48:15,758 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/82dc47634071481cb875aad797372453 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/82dc47634071481cb875aad797372453 2024-11-11T20:48:15,758 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-11T20:48:15,759 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:48:15,759 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:48:15,759 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731358095745Running coprocessor pre-close hooks at 1731358095745Disabling compacts and flushes for region at 1731358095745Disabling writes for close at 1731358095745Writing region close event to WAL at 1731358095750 (+5 ms)Running coprocessor post-close hooks at 1731358095759 (+9 ms)Closed at 1731358095759 2024-11-11T20:48:15,759 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/1c31ce7a10844536bf454131691aec28 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/1c31ce7a10844536bf454131691aec28 2024-11-11T20:48:15,759 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T20:48:15,760 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/c7d2cc3bc30044fd88a783fcff8b4d4c to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/c7d2cc3bc30044fd88a783fcff8b4d4c 2024-11-11T20:48:15,761 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/6c4d09e5b32648a5a01b2a7684334ac0 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/6c4d09e5b32648a5a01b2a7684334ac0 2024-11-11T20:48:15,763 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4bcb0eb685f14492b9c90e584b312370 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4bcb0eb685f14492b9c90e584b312370 2024-11-11T20:48:15,764 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/353dfbbf1d4b4ce1b4baff7f821a366c to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/353dfbbf1d4b4ce1b4baff7f821a366c 2024-11-11T20:48:15,765 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/f4fac091d9a343a4b892e6d035da56b0 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/f4fac091d9a343a4b892e6d035da56b0 2024-11-11T20:48:15,766 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/83b35c66d8774f30a33e5bdedc1d1486 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/83b35c66d8774f30a33e5bdedc1d1486 2024-11-11T20:48:15,768 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/15dc26d12ddd480fb6ddaf871cd902c2 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/15dc26d12ddd480fb6ddaf871cd902c2 2024-11-11T20:48:15,769 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/7c2705c360684051a7764b0034d6df63 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/7c2705c360684051a7764b0034d6df63 2024-11-11T20:48:15,770 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4349d1bea5e940289561bd2b04950bee to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/4349d1bea5e940289561bd2b04950bee 2024-11-11T20:48:15,771 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/d1d0517c56c7495eafe8dd5c0eec9c85 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/d1d0517c56c7495eafe8dd5c0eec9c85 2024-11-11T20:48:15,772 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/12d16c317ddb498188df1c2268221588 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/12d16c317ddb498188df1c2268221588 2024-11-11T20:48:15,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e46c17eef8224659ab2e5b059ac20e41 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/e46c17eef8224659ab2e5b059ac20e41 2024-11-11T20:48:15,774 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/ff94ca7b48284de1a2ca461f86b87a2b to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/info/ff94ca7b48284de1a2ca461f86b87a2b 2024-11-11T20:48:15,775 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=51ca66f7ee3c:36227 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T20:48:15,775 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e6d6e8e865904ff09ce6ecddd224b8d0=42984, b2dcaaa7783845b4af9a9ff5d84a4deb=12516, d4f60e91475341f1aecee2a45472baeb=67948, 82dc47634071481cb875aad797372453=22238, 1c31ce7a10844536bf454131691aec28=15750, c7d2cc3bc30044fd88a783fcff8b4d4c=91843, 6c4d09e5b32648a5a01b2a7684334ac0=17906, 4bcb0eb685f14492b9c90e584b312370=24394, 353dfbbf1d4b4ce1b4baff7f821a366c=118899, f4fac091d9a343a4b892e6d035da56b0=12516, 83b35c66d8774f30a33e5bdedc1d1486=14672, 15dc26d12ddd480fb6ddaf871cd902c2=148312, 7c2705c360684051a7764b0034d6df63=24394, 4349d1bea5e940289561bd2b04950bee=17918, d1d0517c56c7495eafe8dd5c0eec9c85=176476, 12d16c317ddb498188df1c2268221588=20092, e46c17eef8224659ab2e5b059ac20e41=12523, ff94ca7b48284de1a2ca461f86b87a2b=17918] 2024-11-11T20:48:15,791 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/e2da7cb8257b39902dcb50ef5e5e3cc7/recovered.edits/342.seqid, newMaxSeqId=342, maxSeqId=130 2024-11-11T20:48:15,791 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:48:15,792 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e2da7cb8257b39902dcb50ef5e5e3cc7: Waiting for close lock at 1731358095738Running coprocessor pre-close hooks at 1731358095738Disabling compacts and flushes for region at 1731358095738Disabling writes for close at 1731358095739 (+1 ms)Writing region close event to WAL at 1731358095780 (+41 ms)Running coprocessor post-close hooks at 1731358095791 (+11 ms)Closed at 1731358095791 2024-11-11T20:48:15,792 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731358063089.e2da7cb8257b39902dcb50ef5e5e3cc7. 2024-11-11T20:48:15,792 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 17b383949ba3671cbe9bcb0fc4722887, disabling compactions & flushes 2024-11-11T20:48:15,792 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. 2024-11-11T20:48:15,792 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. 2024-11-11T20:48:15,792 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. after waiting 0 ms 2024-11-11T20:48:15,792 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. 2024-11-11T20:48:15,793 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43->hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/31e28af4302d239e8d29f4d067b98d43/info/60d2284b6f554f8eb691c71dd5237733-bottom] to archive 2024-11-11T20:48:15,794 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T20:48:15,797 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43 to hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/archive/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/info/60d2284b6f554f8eb691c71dd5237733.31e28af4302d239e8d29f4d067b98d43 2024-11-11T20:48:15,797 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-11T20:48:15,801 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/data/default/TestLogRolling-testLogRolling/17b383949ba3671cbe9bcb0fc4722887/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-11T20:48:15,802 INFO [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. 2024-11-11T20:48:15,802 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 17b383949ba3671cbe9bcb0fc4722887: Waiting for close lock at 1731358095792Running coprocessor pre-close hooks at 1731358095792Disabling compacts and flushes for region at 1731358095792Disabling writes for close at 1731358095792Writing region close event to WAL at 1731358095797 (+5 ms)Running coprocessor post-close hooks at 1731358095802 (+5 ms)Closed at 1731358095802 2024-11-11T20:48:15,802 DEBUG [RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731358063089.17b383949ba3671cbe9bcb0fc4722887. 2024-11-11T20:48:15,945 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(976): stopping server 51ca66f7ee3c,45533,1731358047907; all regions closed. 2024-11-11T20:48:15,945 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,946 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,946 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,946 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,946 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741834_1010 (size=8107) 2024-11-11T20:48:15,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741834_1010 (size=8107) 2024-11-11T20:48:15,950 DEBUG [RS:0;51ca66f7ee3c:45533 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/oldWALs 2024-11-11T20:48:15,950 INFO [RS:0;51ca66f7ee3c:45533 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C45533%2C1731358047907.meta:.meta(num 1731358048661) 2024-11-11T20:48:15,950 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,951 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,951 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,951 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,951 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:15,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741880_1056 (size=780) 2024-11-11T20:48:15,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741880_1056 (size=780) 2024-11-11T20:48:15,954 DEBUG [RS:0;51ca66f7ee3c:45533 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/oldWALs 2024-11-11T20:48:15,954 INFO [RS:0;51ca66f7ee3c:45533 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C45533%2C1731358047907:(num 1731358095699) 2024-11-11T20:48:15,954 DEBUG [RS:0;51ca66f7ee3c:45533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:15,954 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:48:15,955 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:48:15,955 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.ChoreService(370): Chore service for: regionserver/51ca66f7ee3c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T20:48:15,955 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:48:15,955 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:48:15,955 INFO [RS:0;51ca66f7ee3c:45533 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45533 2024-11-11T20:48:15,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:48:15,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/51ca66f7ee3c,45533,1731358047907 2024-11-11T20:48:15,956 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:48:15,957 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [51ca66f7ee3c,45533,1731358047907] 2024-11-11T20:48:15,958 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/51ca66f7ee3c,45533,1731358047907 already deleted, retry=false 2024-11-11T20:48:15,958 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 51ca66f7ee3c,45533,1731358047907 expired; onlineServers=0 2024-11-11T20:48:15,958 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '51ca66f7ee3c,36227,1731358047866' ***** 2024-11-11T20:48:15,958 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T20:48:15,958 INFO [M:0;51ca66f7ee3c:36227 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:48:15,958 INFO [M:0;51ca66f7ee3c:36227 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:48:15,958 DEBUG [M:0;51ca66f7ee3c:36227 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T20:48:15,958 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T20:48:15,958 DEBUG [M:0;51ca66f7ee3c:36227 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T20:48:15,958 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731358048044 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731358048044,5,FailOnTimeoutGroup] 2024-11-11T20:48:15,958 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731358048045 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731358048045,5,FailOnTimeoutGroup] 2024-11-11T20:48:15,958 INFO [M:0;51ca66f7ee3c:36227 {}] hbase.ChoreService(370): Chore service for: master/51ca66f7ee3c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T20:48:15,958 INFO [M:0;51ca66f7ee3c:36227 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:48:15,958 DEBUG [M:0;51ca66f7ee3c:36227 {}] master.HMaster(1795): Stopping service threads 2024-11-11T20:48:15,958 INFO [M:0;51ca66f7ee3c:36227 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T20:48:15,958 INFO [M:0;51ca66f7ee3c:36227 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:48:15,959 INFO [M:0;51ca66f7ee3c:36227 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T20:48:15,959 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T20:48:15,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T20:48:15,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:15,959 DEBUG [M:0;51ca66f7ee3c:36227 {}] zookeeper.ZKUtil(347): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T20:48:15,959 WARN [M:0;51ca66f7ee3c:36227 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T20:48:15,960 INFO [M:0;51ca66f7ee3c:36227 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/.lastflushedseqids 2024-11-11T20:48:15,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:15,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:15,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741881_1057 (size=228) 2024-11-11T20:48:15,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741881_1057 (size=228) 2024-11-11T20:48:15,966 INFO [M:0;51ca66f7ee3c:36227 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T20:48:15,966 INFO [M:0;51ca66f7ee3c:36227 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T20:48:15,966 DEBUG [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:48:15,966 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:15,966 DEBUG [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:15,966 DEBUG [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:48:15,966 DEBUG [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:15,966 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.38 KB 2024-11-11T20:48:15,982 DEBUG [M:0;51ca66f7ee3c:36227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17369b6b7a4d4d6b8590a41b2bacdae0 is 82, key is hbase:meta,,1/info:regioninfo/1731358048684/Put/seqid=0 2024-11-11T20:48:15,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741882_1058 (size=5672) 2024-11-11T20:48:15,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741882_1058 (size=5672) 2024-11-11T20:48:15,987 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17369b6b7a4d4d6b8590a41b2bacdae0 2024-11-11T20:48:16,008 DEBUG [M:0;51ca66f7ee3c:36227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12aaf4a6a51c45e397001a0640d8a38a is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731358049119/Put/seqid=0 2024-11-11T20:48:16,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741883_1059 (size=7090) 2024-11-11T20:48:16,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741883_1059 (size=7090) 2024-11-11T20:48:16,013 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12aaf4a6a51c45e397001a0640d8a38a 2024-11-11T20:48:16,017 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 12aaf4a6a51c45e397001a0640d8a38a 2024-11-11T20:48:16,033 DEBUG [M:0;51ca66f7ee3c:36227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/57cd6c870992475388253845c184f934 is 69, key is 51ca66f7ee3c,45533,1731358047907/rs:state/1731358048140/Put/seqid=0 2024-11-11T20:48:16,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741884_1060 (size=5156) 2024-11-11T20:48:16,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741884_1060 (size=5156) 2024-11-11T20:48:16,047 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/57cd6c870992475388253845c184f934 2024-11-11T20:48:16,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:48:16,057 INFO [RS:0;51ca66f7ee3c:45533 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:48:16,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45533-0x100308a9ad70001, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:48:16,057 INFO [RS:0;51ca66f7ee3c:45533 {}] regionserver.HRegionServer(1031): Exiting; stopping=51ca66f7ee3c,45533,1731358047907; zookeeper connection closed. 2024-11-11T20:48:16,057 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@784cad37 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@784cad37 2024-11-11T20:48:16,058 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T20:48:16,064 DEBUG [M:0;51ca66f7ee3c:36227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c2ac46b3fc3941cfa48ca300663f024c is 52, key is load_balancer_on/state:d/1731358048732/Put/seqid=0 2024-11-11T20:48:16,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741885_1061 (size=5056) 2024-11-11T20:48:16,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741885_1061 (size=5056) 2024-11-11T20:48:16,068 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c2ac46b3fc3941cfa48ca300663f024c 2024-11-11T20:48:16,073 DEBUG [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17369b6b7a4d4d6b8590a41b2bacdae0 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17369b6b7a4d4d6b8590a41b2bacdae0 2024-11-11T20:48:16,077 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17369b6b7a4d4d6b8590a41b2bacdae0, entries=8, sequenceid=125, filesize=5.5 K 2024-11-11T20:48:16,078 DEBUG [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12aaf4a6a51c45e397001a0640d8a38a as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/12aaf4a6a51c45e397001a0640d8a38a 2024-11-11T20:48:16,082 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 12aaf4a6a51c45e397001a0640d8a38a 2024-11-11T20:48:16,082 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/12aaf4a6a51c45e397001a0640d8a38a, entries=13, sequenceid=125, filesize=6.9 K 2024-11-11T20:48:16,083 DEBUG [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/57cd6c870992475388253845c184f934 as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/57cd6c870992475388253845c184f934 2024-11-11T20:48:16,087 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/57cd6c870992475388253845c184f934, entries=1, sequenceid=125, filesize=5.0 K 2024-11-11T20:48:16,088 DEBUG [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c2ac46b3fc3941cfa48ca300663f024c as hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c2ac46b3fc3941cfa48ca300663f024c 2024-11-11T20:48:16,091 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41561/user/jenkins/test-data/548595e0-a276-cd1f-4c9f-cc70c3db98eb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c2ac46b3fc3941cfa48ca300663f024c, entries=1, sequenceid=125, filesize=4.9 K 2024-11-11T20:48:16,092 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false 2024-11-11T20:48:16,093 INFO [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:16,093 DEBUG [M:0;51ca66f7ee3c:36227 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731358095966Disabling compacts and flushes for region at 1731358095966Disabling writes for close at 1731358095966Obtaining lock to block concurrent updates at 1731358095966Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731358095966Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64840, getOffHeapSize=0, getCellsCount=148 at 1731358095967 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731358095967Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731358095967Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731358095982 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731358095982Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731358095990 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731358096007 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731358096007Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731358096017 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731358096033 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731358096033Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731358096050 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731358096064 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731358096064Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a5ee5e1: reopening flushed file at 1731358096072 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6817bcb8: reopening flushed file at 1731358096078 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a8acb66: reopening flushed file at 1731358096082 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@335ff2fa: reopening flushed file at 1731358096087 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false at 1731358096092 (+5 ms)Writing region close event to WAL at 1731358096093 (+1 ms)Closed at 1731358096093 2024-11-11T20:48:16,094 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:16,094 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:16,094 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:16,094 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:16,094 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:16,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741830_1006 (size=61332) 2024-11-11T20:48:16,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33809 is added to blk_1073741830_1006 (size=61332) 2024-11-11T20:48:16,096 INFO [M:0;51ca66f7ee3c:36227 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T20:48:16,096 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:48:16,096 INFO [M:0;51ca66f7ee3c:36227 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36227 2024-11-11T20:48:16,096 INFO [M:0;51ca66f7ee3c:36227 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:48:16,159 INFO [regionserver/51ca66f7ee3c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:48:16,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:48:16,197 INFO [M:0;51ca66f7ee3c:36227 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:48:16,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36227-0x100308a9ad70000, quorum=127.0.0.1:63469, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:48:16,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5689196f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:48:16,201 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9612b29{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:48:16,201 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:48:16,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7cf57c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:48:16,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44a25975{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/hadoop.log.dir/,STOPPED} 2024-11-11T20:48:16,203 WARN [BP-1277814845-172.17.0.2-1731358047280 heartbeating to localhost/127.0.0.1:41561 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:48:16,203 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:48:16,203 WARN [BP-1277814845-172.17.0.2-1731358047280 heartbeating to localhost/127.0.0.1:41561 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1277814845-172.17.0.2-1731358047280 (Datanode Uuid bedd0fd8-f18f-42cc-980c-68fad6ce366c) service to localhost/127.0.0.1:41561 2024-11-11T20:48:16,203 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:48:16,204 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/data/data3/current/BP-1277814845-172.17.0.2-1731358047280 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:48:16,204 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/data/data4/current/BP-1277814845-172.17.0.2-1731358047280 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:48:16,204 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:48:16,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d8c7847{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:48:16,206 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45890504{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:48:16,206 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:48:16,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@408fd242{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:48:16,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5327e2a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/hadoop.log.dir/,STOPPED} 2024-11-11T20:48:16,208 WARN [BP-1277814845-172.17.0.2-1731358047280 heartbeating to localhost/127.0.0.1:41561 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:48:16,208 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:48:16,208 WARN [BP-1277814845-172.17.0.2-1731358047280 heartbeating to localhost/127.0.0.1:41561 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1277814845-172.17.0.2-1731358047280 (Datanode Uuid fb8a8c10-9d3e-4052-a3f9-56e949d8d679) service to localhost/127.0.0.1:41561 2024-11-11T20:48:16,208 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:48:16,208 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/data/data1/current/BP-1277814845-172.17.0.2-1731358047280 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:48:16,209 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/cluster_a3c4b165-486e-3dcf-597e-3a27b29683f1/data/data2/current/BP-1277814845-172.17.0.2-1731358047280 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:48:16,209 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:48:16,214 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d956b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:48:16,215 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59c539e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:48:16,215 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:48:16,215 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f3f4eb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:48:16,215 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aae5916{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/hadoop.log.dir/,STOPPED} 2024-11-11T20:48:16,222 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T20:48:16,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T20:48:16,269 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 205) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41561 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41561 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41561 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41561 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41561 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41561 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41561 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41561 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41561 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=252 (was 168) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3929 (was 4117) 2024-11-11T20:48:16,276 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=252, ProcessCount=11, AvailableMemoryMB=3929 2024-11-11T20:48:16,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T20:48:16,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/hadoop.log.dir so I do NOT create it in target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399 2024-11-11T20:48:16,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3cea88a2-fa2b-d370-7d01-ef4842d6e027/hadoop.tmp.dir so I do NOT create it in target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399 2024-11-11T20:48:16,277 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2, deleteOnExit=true 2024-11-11T20:48:16,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T20:48:16,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/test.cache.data in system properties and HBase conf 2024-11-11T20:48:16,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T20:48:16,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/hadoop.log.dir in system properties and HBase conf 2024-11-11T20:48:16,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T20:48:16,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T20:48:16,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T20:48:16,278 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T20:48:16,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:48:16,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T20:48:16,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T20:48:16,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:48:16,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T20:48:16,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T20:48:16,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T20:48:16,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:48:16,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T20:48:16,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/nfs.dump.dir in system properties and HBase conf 2024-11-11T20:48:16,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/java.io.tmpdir in system properties and HBase conf 2024-11-11T20:48:16,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T20:48:16,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T20:48:16,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T20:48:16,292 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:48:16,334 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:48:16,337 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:48:16,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:48:16,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:48:16,338 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:48:16,338 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:48:16,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32d01bcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:48:16,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@512e80eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:48:16,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54644e01{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/java.io.tmpdir/jetty-localhost-37537-hadoop-hdfs-3_4_1-tests_jar-_-any-15027649067206827311/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:48:16,443 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@134d2ab8{HTTP/1.1, (http/1.1)}{localhost:37537} 2024-11-11T20:48:16,443 INFO [Time-limited test {}] server.Server(415): Started @285668ms 2024-11-11T20:48:16,458 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T20:48:16,492 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:48:16,495 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:48:16,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:48:16,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:48:16,496 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:48:16,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c8a9439{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:48:16,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e32ebb8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:48:16,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@79ca80d8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/java.io.tmpdir/jetty-localhost-37761-hadoop-hdfs-3_4_1-tests_jar-_-any-5417269006211656093/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:48:16,599 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@150dab73{HTTP/1.1, (http/1.1)}{localhost:37761} 2024-11-11T20:48:16,599 INFO [Time-limited test {}] server.Server(415): Started @285823ms 2024-11-11T20:48:16,600 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:48:16,625 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T20:48:16,627 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T20:48:16,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T20:48:16,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T20:48:16,628 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T20:48:16,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@814e400{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/hadoop.log.dir/,AVAILABLE} 2024-11-11T20:48:16,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7748f5df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T20:48:16,659 WARN [Thread-2483 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/data/data1/current/BP-1891497342-172.17.0.2-1731358096295/current, will proceed with Du for space computation calculation, 2024-11-11T20:48:16,660 WARN [Thread-2484 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/data/data2/current/BP-1891497342-172.17.0.2-1731358096295/current, will proceed with Du for space computation calculation, 2024-11-11T20:48:16,683 WARN [Thread-2462 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:48:16,685 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4924dbff61c7dde with lease ID 0x8cb7c883fea104ee: Processing first storage report for DS-6a2e34d0-87de-4472-8361-5d71811d4b3b from datanode DatanodeRegistration(127.0.0.1:36347, datanodeUuid=afd15729-9f3f-44bb-ac87-43060a8bfa70, infoPort=41971, infoSecurePort=0, ipcPort=45599, storageInfo=lv=-57;cid=testClusterID;nsid=1256470878;c=1731358096295) 2024-11-11T20:48:16,685 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4924dbff61c7dde with lease ID 0x8cb7c883fea104ee: from storage DS-6a2e34d0-87de-4472-8361-5d71811d4b3b node DatanodeRegistration(127.0.0.1:36347, datanodeUuid=afd15729-9f3f-44bb-ac87-43060a8bfa70, infoPort=41971, infoSecurePort=0, ipcPort=45599, storageInfo=lv=-57;cid=testClusterID;nsid=1256470878;c=1731358096295), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:48:16,686 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4924dbff61c7dde with lease ID 0x8cb7c883fea104ee: Processing first storage report for DS-92297a91-47d5-4a29-9ce7-6e990bd5ecde from datanode DatanodeRegistration(127.0.0.1:36347, datanodeUuid=afd15729-9f3f-44bb-ac87-43060a8bfa70, infoPort=41971, infoSecurePort=0, ipcPort=45599, storageInfo=lv=-57;cid=testClusterID;nsid=1256470878;c=1731358096295) 2024-11-11T20:48:16,686 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4924dbff61c7dde with lease ID 0x8cb7c883fea104ee: from storage DS-92297a91-47d5-4a29-9ce7-6e990bd5ecde node DatanodeRegistration(127.0.0.1:36347, datanodeUuid=afd15729-9f3f-44bb-ac87-43060a8bfa70, infoPort=41971, infoSecurePort=0, ipcPort=45599, storageInfo=lv=-57;cid=testClusterID;nsid=1256470878;c=1731358096295), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:48:16,740 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@87b2e2b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/java.io.tmpdir/jetty-localhost-40869-hadoop-hdfs-3_4_1-tests_jar-_-any-4960027373409867502/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:48:16,740 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@380ffe40{HTTP/1.1, (http/1.1)}{localhost:40869} 2024-11-11T20:48:16,740 INFO [Time-limited test {}] server.Server(415): Started @285965ms 2024-11-11T20:48:16,741 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T20:48:16,800 WARN [Thread-2509 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/data/data3/current/BP-1891497342-172.17.0.2-1731358096295/current, will proceed with Du for space computation calculation, 2024-11-11T20:48:16,800 WARN [Thread-2510 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/data/data4/current/BP-1891497342-172.17.0.2-1731358096295/current, will proceed with Du for space computation calculation, 2024-11-11T20:48:16,822 WARN [Thread-2498 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T20:48:16,823 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x721dee5da5d143e6 with lease ID 0x8cb7c883fea104ef: Processing first storage report for DS-632f01a2-35ab-43bd-a7db-34bc1ab8afea from datanode DatanodeRegistration(127.0.0.1:34875, datanodeUuid=c592c507-930f-44ba-a698-c0b40ead0a23, infoPort=38871, infoSecurePort=0, ipcPort=35551, storageInfo=lv=-57;cid=testClusterID;nsid=1256470878;c=1731358096295) 2024-11-11T20:48:16,823 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x721dee5da5d143e6 with lease ID 0x8cb7c883fea104ef: from storage DS-632f01a2-35ab-43bd-a7db-34bc1ab8afea node DatanodeRegistration(127.0.0.1:34875, datanodeUuid=c592c507-930f-44ba-a698-c0b40ead0a23, infoPort=38871, infoSecurePort=0, ipcPort=35551, storageInfo=lv=-57;cid=testClusterID;nsid=1256470878;c=1731358096295), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:48:16,823 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x721dee5da5d143e6 with lease ID 0x8cb7c883fea104ef: Processing first storage report for DS-92a92cfe-d951-437e-b4ab-86458028dd24 from datanode DatanodeRegistration(127.0.0.1:34875, datanodeUuid=c592c507-930f-44ba-a698-c0b40ead0a23, infoPort=38871, infoSecurePort=0, ipcPort=35551, storageInfo=lv=-57;cid=testClusterID;nsid=1256470878;c=1731358096295) 2024-11-11T20:48:16,823 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x721dee5da5d143e6 with lease ID 0x8cb7c883fea104ef: from storage DS-92a92cfe-d951-437e-b4ab-86458028dd24 node DatanodeRegistration(127.0.0.1:34875, datanodeUuid=c592c507-930f-44ba-a698-c0b40ead0a23, infoPort=38871, infoSecurePort=0, ipcPort=35551, storageInfo=lv=-57;cid=testClusterID;nsid=1256470878;c=1731358096295), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T20:48:16,830 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:48:16,830 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T20:48:16,831 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T20:48:16,831 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-11T20:48:16,862 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399 2024-11-11T20:48:16,864 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/zookeeper_0, clientPort=54577, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T20:48:16,865 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54577 2024-11-11T20:48:16,865 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:48:16,867 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:48:16,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:48:16,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741825_1001 (size=7) 2024-11-11T20:48:16,877 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c with version=8 2024-11-11T20:48:16,877 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43051/user/jenkins/test-data/1e9aba8d-bb8b-1719-610b-d88fd215f6a1/hbase-staging 2024-11-11T20:48:16,880 INFO [Time-limited test {}] client.ConnectionUtils(128): master/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:48:16,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:48:16,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:48:16,880 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:48:16,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:48:16,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:48:16,880 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T20:48:16,880 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:48:16,881 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40227 2024-11-11T20:48:16,882 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40227 connecting to ZooKeeper ensemble=127.0.0.1:54577 2024-11-11T20:48:16,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:402270x0, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:48:16,887 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40227-0x100308b5a4e0000 connected 2024-11-11T20:48:16,903 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:48:16,904 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:48:16,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:48:16,907 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c, hbase.cluster.distributed=false 2024-11-11T20:48:16,909 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:48:16,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40227 2024-11-11T20:48:16,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40227 2024-11-11T20:48:16,918 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40227 2024-11-11T20:48:16,918 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40227 2024-11-11T20:48:16,918 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40227 2024-11-11T20:48:16,931 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/51ca66f7ee3c:0 server-side Connection retries=45 2024-11-11T20:48:16,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:48:16,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T20:48:16,931 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T20:48:16,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T20:48:16,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T20:48:16,931 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T20:48:16,932 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T20:48:16,932 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38397 2024-11-11T20:48:16,933 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38397 connecting to ZooKeeper ensemble=127.0.0.1:54577 2024-11-11T20:48:16,934 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:48:16,935 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:48:16,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:383970x0, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T20:48:16,938 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:48:16,938 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38397-0x100308b5a4e0001 connected 2024-11-11T20:48:16,939 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T20:48:16,941 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T20:48:16,941 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T20:48:16,942 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T20:48:16,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38397 2024-11-11T20:48:16,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38397 2024-11-11T20:48:16,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38397 2024-11-11T20:48:16,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38397 2024-11-11T20:48:16,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38397 2024-11-11T20:48:16,956 DEBUG [M:0;51ca66f7ee3c:40227 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;51ca66f7ee3c:40227 2024-11-11T20:48:16,956 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/51ca66f7ee3c,40227,1731358096879 2024-11-11T20:48:16,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:48:16,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:48:16,958 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/51ca66f7ee3c,40227,1731358096879 2024-11-11T20:48:16,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T20:48:16,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:16,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:16,959 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T20:48:16,959 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/51ca66f7ee3c,40227,1731358096879 from backup master directory 2024-11-11T20:48:16,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/51ca66f7ee3c,40227,1731358096879 2024-11-11T20:48:16,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:48:16,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T20:48:16,960 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:48:16,960 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=51ca66f7ee3c,40227,1731358096879 2024-11-11T20:48:16,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:16,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:16,965 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/hbase.id] with ID: a6a7b8ae-674b-4568-a8bc-7c60a0cc6272 2024-11-11T20:48:16,965 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/.tmp/hbase.id 2024-11-11T20:48:16,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:48:16,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741826_1002 (size=42) 2024-11-11T20:48:16,974 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/.tmp/hbase.id]:[hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/hbase.id] 2024-11-11T20:48:16,984 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:48:16,984 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T20:48:16,985 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T20:48:16,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:16,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:16,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:48:16,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741827_1003 (size=196) 2024-11-11T20:48:16,997 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T20:48:16,998 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T20:48:16,998 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:48:17,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:48:17,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741828_1004 (size=1189) 2024-11-11T20:48:17,008 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store 2024-11-11T20:48:17,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:48:17,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741829_1005 (size=34) 2024-11-11T20:48:17,014 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:48:17,014 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:48:17,014 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:17,014 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:17,014 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:48:17,014 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:17,014 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:17,014 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731358097014Disabling compacts and flushes for region at 1731358097014Disabling writes for close at 1731358097014Writing region close event to WAL at 1731358097014Closed at 1731358097014 2024-11-11T20:48:17,015 WARN [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/.initializing 2024-11-11T20:48:17,015 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/WALs/51ca66f7ee3c,40227,1731358096879 2024-11-11T20:48:17,017 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C40227%2C1731358096879, suffix=, logDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/WALs/51ca66f7ee3c,40227,1731358096879, archiveDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/oldWALs, maxLogs=10 2024-11-11T20:48:17,018 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C40227%2C1731358096879.1731358097018 2024-11-11T20:48:17,022 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/WALs/51ca66f7ee3c,40227,1731358096879/51ca66f7ee3c%2C40227%2C1731358096879.1731358097018 2024-11-11T20:48:17,026 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41971:41971),(127.0.0.1/127.0.0.1:38871:38871)] 2024-11-11T20:48:17,029 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:48:17,029 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:48:17,029 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,029 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,031 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T20:48:17,032 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:48:17,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T20:48:17,034 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:48:17,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T20:48:17,036 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:48:17,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T20:48:17,038 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T20:48:17,039 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,039 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,040 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,041 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,041 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,041 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T20:48:17,042 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T20:48:17,044 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:48:17,044 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707224, jitterRate=-0.10071876645088196}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T20:48:17,045 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731358097029Initializing all the Stores at 1731358097030 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358097030Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358097030Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358097030Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358097030Cleaning up temporary data from old regions at 1731358097041 (+11 ms)Region opened successfully at 1731358097044 (+3 ms) 2024-11-11T20:48:17,045 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T20:48:17,048 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58625b97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:48:17,049 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T20:48:17,049 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T20:48:17,049 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T20:48:17,049 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T20:48:17,049 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T20:48:17,050 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T20:48:17,050 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T20:48:17,052 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T20:48:17,053 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T20:48:17,054 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T20:48:17,054 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T20:48:17,055 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T20:48:17,055 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T20:48:17,056 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T20:48:17,056 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T20:48:17,057 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T20:48:17,058 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T20:48:17,059 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T20:48:17,061 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T20:48:17,062 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T20:48:17,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:48:17,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T20:48:17,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:17,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:17,063 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=51ca66f7ee3c,40227,1731358096879, sessionid=0x100308b5a4e0000, setting cluster-up flag (Was=false) 2024-11-11T20:48:17,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:17,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:17,067 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T20:48:17,069 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,40227,1731358096879 2024-11-11T20:48:17,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:17,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:17,075 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T20:48:17,076 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=51ca66f7ee3c,40227,1731358096879 2024-11-11T20:48:17,077 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T20:48:17,082 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T20:48:17,083 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T20:48:17,083 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T20:48:17,083 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 51ca66f7ee3c,40227,1731358096879 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T20:48:17,084 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:48:17,084 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:48:17,084 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:48:17,084 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=5, maxPoolSize=5 2024-11-11T20:48:17,084 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/51ca66f7ee3c:0, corePoolSize=10, maxPoolSize=10 2024-11-11T20:48:17,084 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,084 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:48:17,084 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,091 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:48:17,091 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T20:48:17,092 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,092 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T20:48:17,097 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731358127097 2024-11-11T20:48:17,097 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T20:48:17,097 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T20:48:17,097 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T20:48:17,097 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T20:48:17,098 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T20:48:17,098 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T20:48:17,098 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,098 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T20:48:17,098 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T20:48:17,098 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T20:48:17,099 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T20:48:17,099 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T20:48:17,101 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731358097099,5,FailOnTimeoutGroup] 2024-11-11T20:48:17,103 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731358097101,5,FailOnTimeoutGroup] 2024-11-11T20:48:17,103 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,103 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T20:48:17,103 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,103 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:48:17,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741831_1007 (size=1321) 2024-11-11T20:48:17,106 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T20:48:17,106 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c 2024-11-11T20:48:17,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:48:17,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741832_1008 (size=32) 2024-11-11T20:48:17,118 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:48:17,119 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:48:17,120 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:48:17,120 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,120 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:48:17,120 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:48:17,121 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:48:17,121 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,121 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:48:17,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:48:17,122 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:48:17,123 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:48:17,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:48:17,124 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:48:17,124 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,124 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:48:17,124 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:48:17,125 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740 2024-11-11T20:48:17,125 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740 2024-11-11T20:48:17,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:48:17,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:48:17,127 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:48:17,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:48:17,130 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T20:48:17,130 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767690, jitterRate=-0.023832812905311584}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:48:17,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731358097118Initializing all the Stores at 1731358097119 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358097119Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358097119Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358097119Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358097119Cleaning up temporary data from old regions at 1731358097127 (+8 ms)Region opened successfully at 1731358097131 (+4 ms) 2024-11-11T20:48:17,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:48:17,131 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:48:17,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:48:17,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:48:17,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:48:17,131 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:48:17,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731358097131Disabling compacts and flushes for region at 1731358097131Disabling writes for close at 1731358097131Writing region close event to WAL at 1731358097131Closed at 1731358097131 2024-11-11T20:48:17,132 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:48:17,132 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T20:48:17,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T20:48:17,134 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:48:17,134 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T20:48:17,148 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(746): ClusterId : a6a7b8ae-674b-4568-a8bc-7c60a0cc6272 2024-11-11T20:48:17,148 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T20:48:17,150 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T20:48:17,150 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T20:48:17,151 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T20:48:17,152 DEBUG [RS:0;51ca66f7ee3c:38397 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b7e03b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=51ca66f7ee3c/172.17.0.2:0 2024-11-11T20:48:17,163 DEBUG [RS:0;51ca66f7ee3c:38397 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;51ca66f7ee3c:38397 2024-11-11T20:48:17,163 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T20:48:17,163 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T20:48:17,163 DEBUG [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T20:48:17,164 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(2659): reportForDuty to master=51ca66f7ee3c,40227,1731358096879 with port=38397, startcode=1731358096931 2024-11-11T20:48:17,164 DEBUG [RS:0;51ca66f7ee3c:38397 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T20:48:17,166 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39159, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T20:48:17,166 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40227 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:17,166 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40227 {}] master.ServerManager(517): Registering regionserver=51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:17,167 DEBUG [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c 2024-11-11T20:48:17,167 DEBUG [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37791 2024-11-11T20:48:17,167 DEBUG [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T20:48:17,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:48:17,169 DEBUG [RS:0;51ca66f7ee3c:38397 {}] zookeeper.ZKUtil(111): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:17,169 WARN [RS:0;51ca66f7ee3c:38397 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T20:48:17,169 INFO [RS:0;51ca66f7ee3c:38397 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:48:17,169 DEBUG [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/WALs/51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:17,169 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [51ca66f7ee3c,38397,1731358096931] 2024-11-11T20:48:17,173 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T20:48:17,174 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T20:48:17,175 INFO [RS:0;51ca66f7ee3c:38397 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T20:48:17,175 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,175 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T20:48:17,176 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T20:48:17,176 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,176 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,176 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,176 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,176 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,176 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,176 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=2, maxPoolSize=2 2024-11-11T20:48:17,176 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,176 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,176 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,176 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,177 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,177 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/51ca66f7ee3c:0, corePoolSize=1, maxPoolSize=1 2024-11-11T20:48:17,177 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:48:17,177 DEBUG [RS:0;51ca66f7ee3c:38397 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/51ca66f7ee3c:0, corePoolSize=3, maxPoolSize=3 2024-11-11T20:48:17,181 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,181 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,181 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,181 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,181 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,181 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,38397,1731358096931-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:48:17,199 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T20:48:17,200 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,38397,1731358096931-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,200 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,200 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.Replication(171): 51ca66f7ee3c,38397,1731358096931 started 2024-11-11T20:48:17,213 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,213 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(1482): Serving as 51ca66f7ee3c,38397,1731358096931, RpcServer on 51ca66f7ee3c/172.17.0.2:38397, sessionid=0x100308b5a4e0001 2024-11-11T20:48:17,213 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T20:48:17,213 DEBUG [RS:0;51ca66f7ee3c:38397 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:17,213 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,38397,1731358096931' 2024-11-11T20:48:17,213 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T20:48:17,214 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T20:48:17,214 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T20:48:17,214 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T20:48:17,214 DEBUG [RS:0;51ca66f7ee3c:38397 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:17,214 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '51ca66f7ee3c,38397,1731358096931' 2024-11-11T20:48:17,214 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T20:48:17,215 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T20:48:17,215 DEBUG [RS:0;51ca66f7ee3c:38397 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T20:48:17,215 INFO [RS:0;51ca66f7ee3c:38397 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T20:48:17,215 INFO [RS:0;51ca66f7ee3c:38397 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T20:48:17,284 WARN [51ca66f7ee3c:40227 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T20:48:17,317 INFO [RS:0;51ca66f7ee3c:38397 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C38397%2C1731358096931, suffix=, logDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/WALs/51ca66f7ee3c,38397,1731358096931, archiveDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/oldWALs, maxLogs=32 2024-11-11T20:48:17,318 INFO [RS:0;51ca66f7ee3c:38397 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C38397%2C1731358096931.1731358097318 2024-11-11T20:48:17,326 INFO [RS:0;51ca66f7ee3c:38397 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/WALs/51ca66f7ee3c,38397,1731358096931/51ca66f7ee3c%2C38397%2C1731358096931.1731358097318 2024-11-11T20:48:17,331 DEBUG [RS:0;51ca66f7ee3c:38397 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41971:41971),(127.0.0.1/127.0.0.1:38871:38871)] 2024-11-11T20:48:17,535 DEBUG [51ca66f7ee3c:40227 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T20:48:17,536 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:17,539 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,38397,1731358096931, state=OPENING 2024-11-11T20:48:17,542 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T20:48:17,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:17,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:17,544 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T20:48:17,545 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,38397,1731358096931}] 2024-11-11T20:48:17,545 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:48:17,545 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:48:17,698 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T20:48:17,701 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43653, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T20:48:17,704 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T20:48:17,704 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:48:17,706 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=51ca66f7ee3c%2C38397%2C1731358096931.meta, suffix=.meta, logDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/WALs/51ca66f7ee3c,38397,1731358096931, archiveDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/oldWALs, maxLogs=32 2024-11-11T20:48:17,707 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 51ca66f7ee3c%2C38397%2C1731358096931.meta.1731358097707.meta 2024-11-11T20:48:17,727 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/WALs/51ca66f7ee3c,38397,1731358096931/51ca66f7ee3c%2C38397%2C1731358096931.meta.1731358097707.meta 2024-11-11T20:48:17,727 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41971:41971),(127.0.0.1/127.0.0.1:38871:38871)] 2024-11-11T20:48:17,728 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T20:48:17,728 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T20:48:17,728 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T20:48:17,729 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T20:48:17,729 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T20:48:17,729 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T20:48:17,729 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T20:48:17,729 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T20:48:17,730 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T20:48:17,731 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T20:48:17,731 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:48:17,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T20:48:17,732 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T20:48:17,732 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:48:17,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T20:48:17,733 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T20:48:17,733 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:48:17,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T20:48:17,734 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T20:48:17,734 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T20:48:17,734 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T20:48:17,734 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T20:48:17,735 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740 2024-11-11T20:48:17,736 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740 2024-11-11T20:48:17,737 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T20:48:17,737 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T20:48:17,737 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T20:48:17,738 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T20:48:17,739 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=746618, jitterRate=-0.05062636733055115}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T20:48:17,739 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T20:48:17,740 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731358097729Writing region info on filesystem at 1731358097729Initializing all the Stores at 1731358097730 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358097730Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358097730Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731358097730Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731358097730Cleaning up temporary data from old regions at 1731358097737 (+7 ms)Running coprocessor post-open hooks at 1731358097739 (+2 ms)Region opened successfully at 1731358097740 (+1 ms) 2024-11-11T20:48:17,741 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731358097697 2024-11-11T20:48:17,743 DEBUG [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T20:48:17,743 INFO [RS_OPEN_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T20:48:17,743 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:17,744 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 51ca66f7ee3c,38397,1731358096931, state=OPEN 2024-11-11T20:48:17,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:48:17,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T20:48:17,746 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:17,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:48:17,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T20:48:17,748 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T20:48:17,748 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=51ca66f7ee3c,38397,1731358096931 in 201 msec 2024-11-11T20:48:17,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T20:48:17,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-11-11T20:48:17,751 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T20:48:17,751 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T20:48:17,752 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:48:17,752 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,38397,1731358096931, seqNum=-1] 2024-11-11T20:48:17,752 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:48:17,754 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51237, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:48:17,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 676 msec 2024-11-11T20:48:17,758 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731358097758, completionTime=-1 2024-11-11T20:48:17,758 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T20:48:17,758 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T20:48:17,760 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T20:48:17,760 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731358157760 2024-11-11T20:48:17,760 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731358217760 2024-11-11T20:48:17,760 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-11T20:48:17,760 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40227,1731358096879-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,760 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40227,1731358096879-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,760 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40227,1731358096879-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,760 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-51ca66f7ee3c:40227, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,760 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,760 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,762 DEBUG [master/51ca66f7ee3c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T20:48:17,764 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.804sec 2024-11-11T20:48:17,764 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T20:48:17,764 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T20:48:17,764 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T20:48:17,764 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T20:48:17,764 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T20:48:17,764 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40227,1731358096879-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T20:48:17,764 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40227,1731358096879-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T20:48:17,767 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T20:48:17,767 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T20:48:17,767 INFO [master/51ca66f7ee3c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=51ca66f7ee3c,40227,1731358096879-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T20:48:17,848 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@477d322e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:48:17,848 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 51ca66f7ee3c,40227,-1 for getting cluster id 2024-11-11T20:48:17,848 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T20:48:17,850 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a6a7b8ae-674b-4568-a8bc-7c60a0cc6272' 2024-11-11T20:48:17,850 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T20:48:17,850 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a6a7b8ae-674b-4568-a8bc-7c60a0cc6272" 2024-11-11T20:48:17,850 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c25219, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:48:17,850 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [51ca66f7ee3c,40227,-1] 2024-11-11T20:48:17,851 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T20:48:17,851 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:17,852 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37922, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T20:48:17,852 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@403020f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T20:48:17,853 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T20:48:17,853 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=51ca66f7ee3c,38397,1731358096931, seqNum=-1] 2024-11-11T20:48:17,854 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T20:48:17,855 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33632, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T20:48:17,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=51ca66f7ee3c,40227,1731358096879 2024-11-11T20:48:17,857 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T20:48:17,859 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T20:48:17,860 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T20:48:17,862 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/WALs/test.com,8080,1, archiveDir=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/oldWALs, maxLogs=32 2024-11-11T20:48:17,862 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731358097862 2024-11-11T20:48:17,871 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/WALs/test.com,8080,1/test.com%2C8080%2C1.1731358097862 2024-11-11T20:48:17,872 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38871:38871),(127.0.0.1/127.0.0.1:41971:41971)] 2024-11-11T20:48:17,873 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731358097873 2024-11-11T20:48:17,886 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:17,887 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:17,887 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:17,887 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:17,887 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:17,887 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/WALs/test.com,8080,1/test.com%2C8080%2C1.1731358097862 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/WALs/test.com,8080,1/test.com%2C8080%2C1.1731358097873 2024-11-11T20:48:17,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741835_1011 (size=93) 2024-11-11T20:48:17,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741835_1011 (size=93) 2024-11-11T20:48:17,912 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38871:38871),(127.0.0.1/127.0.0.1:41971:41971)] 2024-11-11T20:48:17,913 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/WALs/test.com,8080,1/test.com%2C8080%2C1.1731358097862 to hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/oldWALs/test.com%2C8080%2C1.1731358097862 2024-11-11T20:48:17,917 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:17,917 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:17,917 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:17,917 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:17,917 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:17,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741836_1012 (size=93) 2024-11-11T20:48:17,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741836_1012 (size=93) 2024-11-11T20:48:17,927 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/oldWALs 2024-11-11T20:48:17,927 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731358097873) 2024-11-11T20:48:17,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T20:48:17,927 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:48:17,927 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:48:17,927 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:17,927 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:17,928 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T20:48:17,928 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T20:48:17,928 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=948566823, stopped=false 2024-11-11T20:48:17,928 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=51ca66f7ee3c,40227,1731358096879 2024-11-11T20:48:17,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:48:17,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T20:48:17,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:17,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:17,929 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:48:17,930 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T20:48:17,930 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:48:17,930 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:17,930 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '51ca66f7ee3c,38397,1731358096931' ***** 2024-11-11T20:48:17,930 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T20:48:17,930 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:48:17,931 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T20:48:17,931 INFO [RS:0;51ca66f7ee3c:38397 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T20:48:17,931 INFO [RS:0;51ca66f7ee3c:38397 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T20:48:17,931 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T20:48:17,931 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(959): stopping server 51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:17,931 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:48:17,931 INFO [RS:0;51ca66f7ee3c:38397 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;51ca66f7ee3c:38397. 2024-11-11T20:48:17,931 DEBUG [RS:0;51ca66f7ee3c:38397 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T20:48:17,931 DEBUG [RS:0;51ca66f7ee3c:38397 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:17,931 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T20:48:17,931 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T20:48:17,931 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T20:48:17,931 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T20:48:17,932 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T20:48:17,932 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T20:48:17,932 DEBUG [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T20:48:17,932 DEBUG [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T20:48:17,932 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T20:48:17,932 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T20:48:17,933 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T20:48:17,933 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T20:48:17,933 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T20:48:17,933 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-11T20:48:17,964 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740/.tmp/ns/0ee71bb543e0419aab42d337a6013738 is 43, key is default/ns:d/1731358097754/Put/seqid=0 2024-11-11T20:48:17,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,46139,1731357917854/51ca66f7ee3c%2C46139%2C1731357917854.1731357918051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:17,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46503/user/jenkins/test-data/a0bd8969-b937-d2c8-8283-0450e9777411/WALs/51ca66f7ee3c,38547,1731357916578/51ca66f7ee3c%2C38547%2C1731357916578.meta.1731357917676.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T20:48:17,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741837_1013 (size=5153) 2024-11-11T20:48:17,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741837_1013 (size=5153) 2024-11-11T20:48:17,972 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740/.tmp/ns/0ee71bb543e0419aab42d337a6013738 2024-11-11T20:48:17,978 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740/.tmp/ns/0ee71bb543e0419aab42d337a6013738 as hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740/ns/0ee71bb543e0419aab42d337a6013738 2024-11-11T20:48:17,986 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740/ns/0ee71bb543e0419aab42d337a6013738, entries=2, sequenceid=6, filesize=5.0 K 2024-11-11T20:48:17,987 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 54ms, sequenceid=6, compaction requested=false 2024-11-11T20:48:17,987 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T20:48:17,994 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T20:48:17,995 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T20:48:17,995 INFO [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T20:48:17,995 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731358097932Running coprocessor pre-close hooks at 1731358097932Disabling compacts and flushes for region at 1731358097932Disabling writes for close at 1731358097933 (+1 ms)Obtaining lock to block concurrent updates at 1731358097933Preparing flush snapshotting stores in 1588230740 at 1731358097933Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731358097933Flushing stores of hbase:meta,,1.1588230740 at 1731358097935 (+2 ms)Flushing 1588230740/ns: creating writer at 1731358097935Flushing 1588230740/ns: appending metadata at 1731358097963 (+28 ms)Flushing 1588230740/ns: closing flushed file at 1731358097963Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bc6ed3e: reopening flushed file at 1731358097977 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 54ms, sequenceid=6, compaction requested=false at 1731358097987 (+10 ms)Writing region close event to WAL at 1731358097989 (+2 ms)Running coprocessor post-close hooks at 1731358097995 (+6 ms)Closed at 1731358097995 2024-11-11T20:48:17,995 DEBUG [RS_CLOSE_META-regionserver/51ca66f7ee3c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T20:48:18,132 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(976): stopping server 51ca66f7ee3c,38397,1731358096931; all regions closed. 2024-11-11T20:48:18,133 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,133 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,133 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,133 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741834_1010 (size=1152) 2024-11-11T20:48:18,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741834_1010 (size=1152) 2024-11-11T20:48:18,140 DEBUG [RS:0;51ca66f7ee3c:38397 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/oldWALs 2024-11-11T20:48:18,140 INFO [RS:0;51ca66f7ee3c:38397 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C38397%2C1731358096931.meta:.meta(num 1731358097707) 2024-11-11T20:48:18,141 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,141 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,141 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,141 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,141 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741833_1009 (size=93) 2024-11-11T20:48:18,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741833_1009 (size=93) 2024-11-11T20:48:18,146 DEBUG [RS:0;51ca66f7ee3c:38397 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/oldWALs 2024-11-11T20:48:18,146 INFO [RS:0;51ca66f7ee3c:38397 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 51ca66f7ee3c%2C38397%2C1731358096931:(num 1731358097318) 2024-11-11T20:48:18,146 DEBUG [RS:0;51ca66f7ee3c:38397 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T20:48:18,146 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T20:48:18,146 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:48:18,146 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.ChoreService(370): Chore service for: regionserver/51ca66f7ee3c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T20:48:18,147 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:48:18,147 INFO [regionserver/51ca66f7ee3c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:48:18,147 INFO [RS:0;51ca66f7ee3c:38397 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38397 2024-11-11T20:48:18,148 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:48:18,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/51ca66f7ee3c,38397,1731358096931 2024-11-11T20:48:18,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T20:48:18,149 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [51ca66f7ee3c,38397,1731358096931] 2024-11-11T20:48:18,149 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/51ca66f7ee3c,38397,1731358096931 already deleted, retry=false 2024-11-11T20:48:18,149 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 51ca66f7ee3c,38397,1731358096931 expired; onlineServers=0 2024-11-11T20:48:18,149 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '51ca66f7ee3c,40227,1731358096879' ***** 2024-11-11T20:48:18,149 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T20:48:18,149 INFO [M:0;51ca66f7ee3c:40227 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T20:48:18,149 INFO [M:0;51ca66f7ee3c:40227 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T20:48:18,149 DEBUG [M:0;51ca66f7ee3c:40227 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T20:48:18,150 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T20:48:18,150 DEBUG [M:0;51ca66f7ee3c:40227 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T20:48:18,150 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731358097099 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.large.0-1731358097099,5,FailOnTimeoutGroup] 2024-11-11T20:48:18,150 DEBUG [master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731358097101 {}] cleaner.HFileCleaner(306): Exit Thread[master/51ca66f7ee3c:0:becomeActiveMaster-HFileCleaner.small.0-1731358097101,5,FailOnTimeoutGroup] 2024-11-11T20:48:18,150 INFO [M:0;51ca66f7ee3c:40227 {}] hbase.ChoreService(370): Chore service for: master/51ca66f7ee3c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T20:48:18,150 INFO [M:0;51ca66f7ee3c:40227 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T20:48:18,150 DEBUG [M:0;51ca66f7ee3c:40227 {}] master.HMaster(1795): Stopping service threads 2024-11-11T20:48:18,150 INFO [M:0;51ca66f7ee3c:40227 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T20:48:18,150 INFO [M:0;51ca66f7ee3c:40227 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T20:48:18,150 INFO [M:0;51ca66f7ee3c:40227 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T20:48:18,150 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T20:48:18,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T20:48:18,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T20:48:18,151 DEBUG [M:0;51ca66f7ee3c:40227 {}] zookeeper.ZKUtil(347): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T20:48:18,151 WARN [M:0;51ca66f7ee3c:40227 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T20:48:18,151 INFO [M:0;51ca66f7ee3c:40227 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/.lastflushedseqids 2024-11-11T20:48:18,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741838_1014 (size=99) 2024-11-11T20:48:18,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741838_1014 (size=99) 2024-11-11T20:48:18,156 INFO [M:0;51ca66f7ee3c:40227 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T20:48:18,157 INFO [M:0;51ca66f7ee3c:40227 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T20:48:18,157 DEBUG [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T20:48:18,157 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:18,157 DEBUG [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:18,157 DEBUG [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T20:48:18,157 DEBUG [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:18,157 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-11T20:48:18,173 DEBUG [M:0;51ca66f7ee3c:40227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/69e5a01522844814bf3465f0ff1333ab is 82, key is hbase:meta,,1/info:regioninfo/1731358097743/Put/seqid=0 2024-11-11T20:48:18,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741839_1015 (size=5672) 2024-11-11T20:48:18,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741839_1015 (size=5672) 2024-11-11T20:48:18,178 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/69e5a01522844814bf3465f0ff1333ab 2024-11-11T20:48:18,197 DEBUG [M:0;51ca66f7ee3c:40227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/74a6d2e687b7427aaf51180126b0757b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731358097757/Put/seqid=0 2024-11-11T20:48:18,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741840_1016 (size=5275) 2024-11-11T20:48:18,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741840_1016 (size=5275) 2024-11-11T20:48:18,201 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/74a6d2e687b7427aaf51180126b0757b 2024-11-11T20:48:18,218 DEBUG [M:0;51ca66f7ee3c:40227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7376b050ee954205bc3409e650f4c170 is 69, key is 51ca66f7ee3c,38397,1731358096931/rs:state/1731358097166/Put/seqid=0 2024-11-11T20:48:18,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741841_1017 (size=5156) 2024-11-11T20:48:18,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741841_1017 (size=5156) 2024-11-11T20:48:18,222 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7376b050ee954205bc3409e650f4c170 2024-11-11T20:48:18,239 DEBUG [M:0;51ca66f7ee3c:40227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b4f4b3d195e14c26aa69df970fee99c2 is 52, key is load_balancer_on/state:d/1731358097858/Put/seqid=0 2024-11-11T20:48:18,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741842_1018 (size=5056) 2024-11-11T20:48:18,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741842_1018 (size=5056) 2024-11-11T20:48:18,244 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b4f4b3d195e14c26aa69df970fee99c2 2024-11-11T20:48:18,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:48:18,249 INFO [RS:0;51ca66f7ee3c:38397 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:48:18,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38397-0x100308b5a4e0001, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:48:18,249 INFO [RS:0;51ca66f7ee3c:38397 {}] regionserver.HRegionServer(1031): Exiting; stopping=51ca66f7ee3c,38397,1731358096931; zookeeper connection closed. 2024-11-11T20:48:18,249 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f61f5b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f61f5b 2024-11-11T20:48:18,249 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T20:48:18,250 DEBUG [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/69e5a01522844814bf3465f0ff1333ab as hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/69e5a01522844814bf3465f0ff1333ab 2024-11-11T20:48:18,255 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/69e5a01522844814bf3465f0ff1333ab, entries=8, sequenceid=29, filesize=5.5 K 2024-11-11T20:48:18,256 DEBUG [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/74a6d2e687b7427aaf51180126b0757b as hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/74a6d2e687b7427aaf51180126b0757b 2024-11-11T20:48:18,260 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/74a6d2e687b7427aaf51180126b0757b, entries=3, sequenceid=29, filesize=5.2 K 2024-11-11T20:48:18,261 DEBUG [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7376b050ee954205bc3409e650f4c170 as hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7376b050ee954205bc3409e650f4c170 2024-11-11T20:48:18,266 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7376b050ee954205bc3409e650f4c170, entries=1, sequenceid=29, filesize=5.0 K 2024-11-11T20:48:18,267 DEBUG [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b4f4b3d195e14c26aa69df970fee99c2 as hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b4f4b3d195e14c26aa69df970fee99c2 2024-11-11T20:48:18,271 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37791/user/jenkins/test-data/53a49741-077a-8839-5489-5e66b0ae7c1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b4f4b3d195e14c26aa69df970fee99c2, entries=1, sequenceid=29, filesize=4.9 K 2024-11-11T20:48:18,272 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=29, compaction requested=false 2024-11-11T20:48:18,274 INFO [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T20:48:18,274 DEBUG [M:0;51ca66f7ee3c:40227 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731358098157Disabling compacts and flushes for region at 1731358098157Disabling writes for close at 1731358098157Obtaining lock to block concurrent updates at 1731358098157Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731358098157Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731358098157Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731358098158 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731358098158Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731358098173 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731358098173Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731358098182 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731358098196 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731358098196Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731358098205 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731358098218 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731358098218Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731358098226 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731358098239 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731358098239Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22549c9f: reopening flushed file at 1731358098249 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@784c43f5: reopening flushed file at 1731358098255 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@de9390b: reopening flushed file at 1731358098261 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b958437: reopening flushed file at 1731358098266 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=29, compaction requested=false at 1731358098272 (+6 ms)Writing region close event to WAL at 1731358098273 (+1 ms)Closed at 1731358098273 2024-11-11T20:48:18,274 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,274 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,274 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,274 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,274 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T20:48:18,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34875 is added to blk_1073741830_1006 (size=10311) 2024-11-11T20:48:18,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741830_1006 (size=10311) 2024-11-11T20:48:18,677 INFO [M:0;51ca66f7ee3c:40227 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T20:48:18,677 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T20:48:18,678 INFO [M:0;51ca66f7ee3c:40227 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40227 2024-11-11T20:48:18,678 INFO [M:0;51ca66f7ee3c:40227 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T20:48:18,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:48:18,779 INFO [M:0;51ca66f7ee3c:40227 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T20:48:18,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40227-0x100308b5a4e0000, quorum=127.0.0.1:54577, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T20:48:18,782 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@87b2e2b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:48:18,782 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@380ffe40{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:48:18,783 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:48:18,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7748f5df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:48:18,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@814e400{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/hadoop.log.dir/,STOPPED} 2024-11-11T20:48:18,786 WARN [BP-1891497342-172.17.0.2-1731358096295 heartbeating to localhost/127.0.0.1:37791 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:48:18,786 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:48:18,786 WARN [BP-1891497342-172.17.0.2-1731358096295 heartbeating to localhost/127.0.0.1:37791 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891497342-172.17.0.2-1731358096295 (Datanode Uuid c592c507-930f-44ba-a698-c0b40ead0a23) service to localhost/127.0.0.1:37791 2024-11-11T20:48:18,786 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:48:18,787 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/data/data3/current/BP-1891497342-172.17.0.2-1731358096295 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:48:18,787 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:48:18,787 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/data/data4/current/BP-1891497342-172.17.0.2-1731358096295 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:48:18,798 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@79ca80d8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T20:48:18,798 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@150dab73{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:48:18,798 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:48:18,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e32ebb8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:48:18,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c8a9439{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/hadoop.log.dir/,STOPPED} 2024-11-11T20:48:18,800 WARN [BP-1891497342-172.17.0.2-1731358096295 heartbeating to localhost/127.0.0.1:37791 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T20:48:18,800 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T20:48:18,800 WARN [BP-1891497342-172.17.0.2-1731358096295 heartbeating to localhost/127.0.0.1:37791 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891497342-172.17.0.2-1731358096295 (Datanode Uuid afd15729-9f3f-44bb-ac87-43060a8bfa70) service to localhost/127.0.0.1:37791 2024-11-11T20:48:18,800 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T20:48:18,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/data/data1/current/BP-1891497342-172.17.0.2-1731358096295 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:48:18,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/cluster_6b0c2be6-ac1f-6e04-d925-cfa0089605e2/data/data2/current/BP-1891497342-172.17.0.2-1731358096295 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T20:48:18,801 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T20:48:18,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54644e01{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T20:48:18,807 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@134d2ab8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T20:48:18,807 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T20:48:18,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@512e80eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T20:48:18,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32d01bcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cbe39618-7fdf-65db-ec64-a9da2d13b399/hadoop.log.dir/,STOPPED} 2024-11-11T20:48:18,820 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T20:48:18,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T20:48:18,854 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 230) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37791 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37791 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:37791 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37791 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37791 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=252 (was 252), ProcessCount=11 (was 11), AvailableMemoryMB=3903 (was 3929)