2024-11-15 11:44:45,102 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-15 11:44:45,117 main DEBUG Took 0.012858 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-15 11:44:45,118 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-15 11:44:45,118 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-15 11:44:45,120 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-15 11:44:45,122 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,132 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-15 11:44:45,149 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,151 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,152 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,152 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,153 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,154 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,155 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,155 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,156 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,156 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,158 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,158 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,159 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,159 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,160 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,160 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,161 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,162 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,162 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,163 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,163 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,164 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,164 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,165 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:44:45,165 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,166 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-15 11:44:45,168 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:44:45,170 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-15 11:44:45,173 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-15 11:44:45,173 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-15 11:44:45,175 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-15 11:44:45,176 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-15 11:44:45,188 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-15 11:44:45,192 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-15 11:44:45,195 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-15 11:44:45,195 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-15 11:44:45,196 main DEBUG createAppenders(={Console}) 2024-11-15 11:44:45,197 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-15 11:44:45,197 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-15 11:44:45,198 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-15 11:44:45,199 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-15 11:44:45,199 main DEBUG OutputStream closed 2024-11-15 11:44:45,200 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-15 11:44:45,200 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-15 11:44:45,201 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-15 11:44:45,300 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-15 11:44:45,303 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-15 11:44:45,305 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-15 11:44:45,307 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-15 11:44:45,308 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-15 11:44:45,308 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-15 11:44:45,309 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-15 11:44:45,309 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-15 11:44:45,309 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-15 11:44:45,310 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-15 11:44:45,310 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-15 11:44:45,311 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-15 11:44:45,311 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-15 11:44:45,312 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-15 11:44:45,312 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-15 11:44:45,312 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-15 11:44:45,313 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-15 11:44:45,314 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-15 11:44:45,317 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15 11:44:45,318 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-15 11:44:45,318 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-15 11:44:45,319 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-15T11:44:45,631 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a 2024-11-15 11:44:45,635 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-15 11:44:45,635 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15T11:44:45,646 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-15T11:44:45,690 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=205, ProcessCount=11, AvailableMemoryMB=12008 2024-11-15T11:44:45,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T11:44:45,717 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba, deleteOnExit=true 2024-11-15T11:44:45,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T11:44:45,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/test.cache.data in system properties and HBase conf 2024-11-15T11:44:45,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T11:44:45,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/hadoop.log.dir in system properties and HBase conf 2024-11-15T11:44:45,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T11:44:45,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T11:44:45,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T11:44:45,822 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-15T11:44:45,925 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T11:44:45,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:44:45,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:44:45,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T11:44:45,932 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:44:45,933 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T11:44:45,933 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T11:44:45,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:44:45,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:44:45,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T11:44:45,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/nfs.dump.dir in system properties and HBase conf 2024-11-15T11:44:45,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/java.io.tmpdir in system properties and HBase conf 2024-11-15T11:44:45,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:44:45,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T11:44:45,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T11:44:46,489 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:44:47,074 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-15T11:44:47,159 INFO [Time-limited test {}] log.Log(170): Logging initialized @2912ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-15T11:44:47,238 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:44:47,309 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:44:47,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:44:47,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:44:47,340 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:44:47,355 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:44:47,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:44:47,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:44:47,568 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/java.io.tmpdir/jetty-localhost-35821-hadoop-hdfs-3_4_1-tests_jar-_-any-17650940592584053627/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:44:47,575 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:35821} 2024-11-15T11:44:47,575 INFO [Time-limited test {}] server.Server(415): Started @3329ms 2024-11-15T11:44:47,608 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:44:48,119 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:44:48,128 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:44:48,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:44:48,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:44:48,130 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:44:48,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:44:48,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:44:48,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/java.io.tmpdir/jetty-localhost-33807-hadoop-hdfs-3_4_1-tests_jar-_-any-3623981471111683694/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:44:48,246 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:33807} 2024-11-15T11:44:48,246 INFO [Time-limited test {}] server.Server(415): Started @4000ms 2024-11-15T11:44:48,300 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:44:48,411 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:44:48,419 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:44:48,428 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:44:48,428 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:44:48,429 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:44:48,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:44:48,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:44:48,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/java.io.tmpdir/jetty-localhost-33789-hadoop-hdfs-3_4_1-tests_jar-_-any-3751501186716116701/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:44:48,571 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:33789} 2024-11-15T11:44:48,571 INFO [Time-limited test {}] server.Server(415): Started @4325ms 2024-11-15T11:44:48,574 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:44:49,488 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/data/data1/current/BP-1474210229-172.17.0.2-1731671086590/current, will proceed with Du for space computation calculation, 2024-11-15T11:44:49,489 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/data/data4/current/BP-1474210229-172.17.0.2-1731671086590/current, will proceed with Du for space computation calculation, 2024-11-15T11:44:49,489 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/data/data3/current/BP-1474210229-172.17.0.2-1731671086590/current, will proceed with Du for space computation calculation, 2024-11-15T11:44:49,489 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/data/data2/current/BP-1474210229-172.17.0.2-1731671086590/current, will proceed with Du for space computation calculation, 2024-11-15T11:44:49,524 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:44:49,527 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:44:49,586 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd5ca657784da03c1 with lease ID 0xe89f765bce9045b5: Processing first storage report for DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf from datanode DatanodeRegistration(127.0.0.1:45289, datanodeUuid=86cb6d34-33c4-4994-b4f5-094970d66c80, infoPort=43409, infoSecurePort=0, ipcPort=45331, storageInfo=lv=-57;cid=testClusterID;nsid=1798293373;c=1731671086590) 2024-11-15T11:44:49,587 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd5ca657784da03c1 with lease ID 0xe89f765bce9045b5: from storage DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf node DatanodeRegistration(127.0.0.1:45289, datanodeUuid=86cb6d34-33c4-4994-b4f5-094970d66c80, infoPort=43409, infoSecurePort=0, ipcPort=45331, storageInfo=lv=-57;cid=testClusterID;nsid=1798293373;c=1731671086590), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:44:49,588 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x78a4372098e5bbfe with lease ID 0xe89f765bce9045b4: Processing first storage report for DS-10225212-64ae-479a-8297-cbe5feae3f81 from datanode DatanodeRegistration(127.0.0.1:41851, datanodeUuid=30a7de63-de4a-4d6e-a559-3261a411aa0c, infoPort=44421, infoSecurePort=0, ipcPort=43811, storageInfo=lv=-57;cid=testClusterID;nsid=1798293373;c=1731671086590) 2024-11-15T11:44:49,588 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x78a4372098e5bbfe with lease ID 0xe89f765bce9045b4: from storage DS-10225212-64ae-479a-8297-cbe5feae3f81 node DatanodeRegistration(127.0.0.1:41851, datanodeUuid=30a7de63-de4a-4d6e-a559-3261a411aa0c, infoPort=44421, infoSecurePort=0, ipcPort=43811, storageInfo=lv=-57;cid=testClusterID;nsid=1798293373;c=1731671086590), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:44:49,589 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd5ca657784da03c1 with lease ID 0xe89f765bce9045b5: Processing first storage report for DS-02674261-44dd-42d3-be49-df07e98ebe8a from datanode DatanodeRegistration(127.0.0.1:45289, datanodeUuid=86cb6d34-33c4-4994-b4f5-094970d66c80, infoPort=43409, infoSecurePort=0, ipcPort=45331, storageInfo=lv=-57;cid=testClusterID;nsid=1798293373;c=1731671086590) 2024-11-15T11:44:49,589 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd5ca657784da03c1 with lease ID 0xe89f765bce9045b5: from storage DS-02674261-44dd-42d3-be49-df07e98ebe8a node DatanodeRegistration(127.0.0.1:45289, datanodeUuid=86cb6d34-33c4-4994-b4f5-094970d66c80, infoPort=43409, infoSecurePort=0, ipcPort=45331, storageInfo=lv=-57;cid=testClusterID;nsid=1798293373;c=1731671086590), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:44:49,589 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x78a4372098e5bbfe with lease ID 0xe89f765bce9045b4: Processing first storage report for DS-ad3de616-bb92-4682-85ad-cb85eb557f1b from datanode DatanodeRegistration(127.0.0.1:41851, datanodeUuid=30a7de63-de4a-4d6e-a559-3261a411aa0c, infoPort=44421, infoSecurePort=0, ipcPort=43811, storageInfo=lv=-57;cid=testClusterID;nsid=1798293373;c=1731671086590) 2024-11-15T11:44:49,589 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x78a4372098e5bbfe with lease ID 0xe89f765bce9045b4: from storage DS-ad3de616-bb92-4682-85ad-cb85eb557f1b node DatanodeRegistration(127.0.0.1:41851, datanodeUuid=30a7de63-de4a-4d6e-a559-3261a411aa0c, infoPort=44421, infoSecurePort=0, ipcPort=43811, storageInfo=lv=-57;cid=testClusterID;nsid=1798293373;c=1731671086590), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:44:49,622 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a 2024-11-15T11:44:49,724 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/zookeeper_0, clientPort=52542, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T11:44:49,738 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52542 2024-11-15T11:44:49,754 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:44:49,759 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:44:49,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:44:49,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:44:50,397 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae with version=8 2024-11-15T11:44:50,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/hbase-staging 2024-11-15T11:44:50,482 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-15T11:44:50,740 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:44:50,750 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:44:50,750 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:44:50,757 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:44:50,757 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:44:50,757 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:44:50,939 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T11:44:51,022 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-15T11:44:51,035 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-15T11:44:51,040 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:44:51,073 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 18007 (auto-detected) 2024-11-15T11:44:51,074 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-15T11:44:51,097 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38813 2024-11-15T11:44:51,124 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38813 connecting to ZooKeeper ensemble=127.0.0.1:52542 2024-11-15T11:44:51,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:388130x0, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:44:51,258 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38813-0x1013f9974e80000 connected 2024-11-15T11:44:51,348 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:44:51,351 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:44:51,363 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:44:51,368 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae, hbase.cluster.distributed=false 2024-11-15T11:44:51,398 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:44:51,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38813 2024-11-15T11:44:51,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38813 2024-11-15T11:44:51,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38813 2024-11-15T11:44:51,415 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38813 2024-11-15T11:44:51,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38813 2024-11-15T11:44:51,531 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:44:51,533 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:44:51,533 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:44:51,534 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:44:51,534 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:44:51,534 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:44:51,538 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:44:51,541 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:44:51,543 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40707 2024-11-15T11:44:51,546 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40707 connecting to ZooKeeper ensemble=127.0.0.1:52542 2024-11-15T11:44:51,547 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:44:51,554 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:44:51,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:407070x0, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:44:51,581 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40707-0x1013f9974e80001 connected 2024-11-15T11:44:51,582 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:44:51,587 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:44:51,603 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:44:51,607 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T11:44:51,613 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:44:51,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40707 2024-11-15T11:44:51,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40707 2024-11-15T11:44:51,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40707 2024-11-15T11:44:51,627 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40707 2024-11-15T11:44:51,627 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40707 2024-11-15T11:44:51,644 DEBUG [M:0;7adf9b3d9d04:38813 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7adf9b3d9d04:38813 2024-11-15T11:44:51,647 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7adf9b3d9d04,38813,1731671090565 2024-11-15T11:44:51,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:44:51,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:44:51,687 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7adf9b3d9d04,38813,1731671090565 2024-11-15T11:44:51,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T11:44:51,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:51,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:51,714 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T11:44:51,715 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7adf9b3d9d04,38813,1731671090565 from backup master directory 2024-11-15T11:44:51,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7adf9b3d9d04,38813,1731671090565 2024-11-15T11:44:51,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:44:51,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:44:51,727 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:44:51,727 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7adf9b3d9d04,38813,1731671090565 2024-11-15T11:44:51,729 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-15T11:44:51,730 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-15T11:44:51,793 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/hbase.id] with ID: 5e246bdf-754b-40f7-8500-92df056c7dbe 2024-11-15T11:44:51,793 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/.tmp/hbase.id 2024-11-15T11:44:51,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:44:51,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:44:51,814 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/.tmp/hbase.id]:[hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/hbase.id] 2024-11-15T11:44:51,859 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:44:51,865 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T11:44:51,891 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 24ms. 2024-11-15T11:44:51,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:51,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:51,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:44:51,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:44:52,365 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:44:52,368 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T11:44:52,375 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:44:52,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:44:52,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:44:52,436 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store 2024-11-15T11:44:52,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:44:52,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:44:52,472 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-15T11:44:52,475 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:44:52,477 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:44:52,477 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:44:52,477 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:44:52,479 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:44:52,480 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:44:52,480 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:44:52,481 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671092477Disabling compacts and flushes for region at 1731671092477Disabling writes for close at 1731671092479 (+2 ms)Writing region close event to WAL at 1731671092480 (+1 ms)Closed at 1731671092480 2024-11-15T11:44:52,484 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/.initializing 2024-11-15T11:44:52,485 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/WALs/7adf9b3d9d04,38813,1731671090565 2024-11-15T11:44:52,508 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C38813%2C1731671090565, suffix=, logDir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/WALs/7adf9b3d9d04,38813,1731671090565, archiveDir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/oldWALs, maxLogs=10 2024-11-15T11:44:52,518 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C38813%2C1731671090565.1731671092513 2024-11-15T11:44:52,552 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/WALs/7adf9b3d9d04,38813,1731671090565/7adf9b3d9d04%2C38813%2C1731671090565.1731671092513 2024-11-15T11:44:52,569 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44421:44421),(127.0.0.1/127.0.0.1:43409:43409)] 2024-11-15T11:44:52,576 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:44:52,577 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:44:52,581 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,582 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,660 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T11:44:52,665 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:52,669 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:44:52,670 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T11:44:52,674 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:52,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:44:52,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,679 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T11:44:52,679 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:52,680 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:44:52,680 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T11:44:52,683 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:52,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:44:52,685 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,689 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,691 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,697 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,698 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,703 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T11:44:52,708 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:44:52,713 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:44:52,715 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783545, jitterRate=-0.003671109676361084}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T11:44:52,723 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731671092599Initializing all the Stores at 1731671092601 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671092602 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671092603 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671092603Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671092603Cleaning up temporary data from old regions at 1731671092698 (+95 ms)Region opened successfully at 1731671092723 (+25 ms) 2024-11-15T11:44:52,727 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T11:44:52,764 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36161a69, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:44:52,796 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T11:44:52,811 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T11:44:52,811 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T11:44:52,814 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T11:44:52,816 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-15T11:44:52,822 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-15T11:44:52,822 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T11:44:52,854 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T11:44:52,868 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T11:44:52,921 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T11:44:52,924 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T11:44:52,925 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T11:44:52,934 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T11:44:52,937 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T11:44:52,940 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T11:44:52,951 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T11:44:52,956 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T11:44:52,997 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T11:44:53,017 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T11:44:53,139 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T11:44:53,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:44:53,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:44:53,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:53,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:53,162 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7adf9b3d9d04,38813,1731671090565, sessionid=0x1013f9974e80000, setting cluster-up flag (Was=false) 2024-11-15T11:44:53,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:53,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:53,212 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T11:44:53,214 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,38813,1731671090565 2024-11-15T11:44:53,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:53,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:53,271 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T11:44:53,273 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,38813,1731671090565 2024-11-15T11:44:53,280 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T11:44:53,333 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(746): ClusterId : 5e246bdf-754b-40f7-8500-92df056c7dbe 2024-11-15T11:44:53,335 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:44:53,356 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:44:53,357 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:44:53,361 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T11:44:53,369 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:44:53,370 DEBUG [RS:0;7adf9b3d9d04:40707 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20256adb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:44:53,370 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T11:44:53,376 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T11:44:53,383 DEBUG [RS:0;7adf9b3d9d04:40707 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7adf9b3d9d04:40707 2024-11-15T11:44:53,382 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7adf9b3d9d04,38813,1731671090565 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T11:44:53,386 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:44:53,386 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:44:53,387 DEBUG [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:44:53,388 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:44:53,389 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:44:53,389 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(2659): reportForDuty to master=7adf9b3d9d04,38813,1731671090565 with port=40707, startcode=1731671091491 2024-11-15T11:44:53,389 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:44:53,389 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:44:53,389 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7adf9b3d9d04:0, corePoolSize=10, maxPoolSize=10 2024-11-15T11:44:53,389 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,389 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:44:53,389 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,391 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731671123391 2024-11-15T11:44:53,393 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T11:44:53,394 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T11:44:53,395 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:44:53,396 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T11:44:53,398 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T11:44:53,398 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T11:44:53,399 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T11:44:53,399 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T11:44:53,402 DEBUG [RS:0;7adf9b3d9d04:40707 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:44:53,400 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,411 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T11:44:53,411 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:53,412 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T11:44:53,412 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T11:44:53,412 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T11:44:53,415 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T11:44:53,415 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T11:44:53,418 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671093417,5,FailOnTimeoutGroup] 2024-11-15T11:44:53,421 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671093419,5,FailOnTimeoutGroup] 2024-11-15T11:44:53,421 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,422 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T11:44:53,423 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,423 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:44:53,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:44:53,435 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T11:44:53,436 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae 2024-11-15T11:44:53,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:44:53,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:44:53,465 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:44:53,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:44:53,471 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:44:53,472 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:53,473 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:44:53,473 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:44:53,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:44:53,477 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:53,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:44:53,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:44:53,482 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:44:53,482 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:53,483 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35479, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:44:53,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:44:53,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:44:53,486 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:44:53,486 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:53,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:44:53,488 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:44:53,489 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740 2024-11-15T11:44:53,489 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38813 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:53,490 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740 2024-11-15T11:44:53,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:44:53,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:44:53,492 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38813 {}] master.ServerManager(517): Registering regionserver=7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:53,493 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:44:53,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:44:53,500 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:44:53,511 DEBUG [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae 2024-11-15T11:44:53,511 DEBUG [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37621 2024-11-15T11:44:53,511 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740262, jitterRate=-0.05870944261550903}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:44:53,511 DEBUG [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:44:53,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731671093465Initializing all the Stores at 1731671093467 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671093467Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671093468 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671093468Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671093468Cleaning up temporary data from old regions at 1731671093492 (+24 ms)Region opened successfully at 1731671093516 (+24 ms) 2024-11-15T11:44:53,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:44:53,517 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:44:53,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:44:53,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:44:53,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:44:53,521 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:44:53,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671093517Disabling compacts and flushes for region at 1731671093517Disabling writes for close at 1731671093518 (+1 ms)Writing region close event to WAL at 1731671093521 (+3 ms)Closed at 1731671093521 2024-11-15T11:44:53,525 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:44:53,525 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T11:44:53,532 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T11:44:53,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:44:53,538 DEBUG [RS:0;7adf9b3d9d04:40707 {}] zookeeper.ZKUtil(111): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:53,538 WARN [RS:0;7adf9b3d9d04:40707 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:44:53,538 INFO [RS:0;7adf9b3d9d04:40707 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:44:53,539 DEBUG [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:53,540 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:44:53,541 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7adf9b3d9d04,40707,1731671091491] 2024-11-15T11:44:53,542 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T11:44:53,566 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:44:53,585 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:44:53,595 INFO [RS:0;7adf9b3d9d04:40707 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:44:53,595 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,597 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:44:53,604 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:44:53,606 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,606 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,606 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,607 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,607 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,607 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,607 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:44:53,607 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,607 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,608 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,608 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,608 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,608 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:44:53,608 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:44:53,608 DEBUG [RS:0;7adf9b3d9d04:40707 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:44:53,609 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,609 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,610 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,610 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,610 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,610 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,40707,1731671091491-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:44:53,632 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:44:53,634 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,40707,1731671091491-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,635 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,635 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.Replication(171): 7adf9b3d9d04,40707,1731671091491 started 2024-11-15T11:44:53,654 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:53,655 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(1482): Serving as 7adf9b3d9d04,40707,1731671091491, RpcServer on 7adf9b3d9d04/172.17.0.2:40707, sessionid=0x1013f9974e80001 2024-11-15T11:44:53,656 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:44:53,656 DEBUG [RS:0;7adf9b3d9d04:40707 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:53,656 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,40707,1731671091491' 2024-11-15T11:44:53,656 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:44:53,657 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:44:53,658 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:44:53,658 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:44:53,658 DEBUG [RS:0;7adf9b3d9d04:40707 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:53,658 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,40707,1731671091491' 2024-11-15T11:44:53,658 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:44:53,659 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:44:53,660 DEBUG [RS:0;7adf9b3d9d04:40707 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:44:53,660 INFO [RS:0;7adf9b3d9d04:40707 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:44:53,660 INFO [RS:0;7adf9b3d9d04:40707 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:44:53,694 WARN [7adf9b3d9d04:38813 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-15T11:44:53,768 INFO [RS:0;7adf9b3d9d04:40707 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C40707%2C1731671091491, suffix=, logDir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491, archiveDir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs, maxLogs=32 2024-11-15T11:44:53,770 INFO [RS:0;7adf9b3d9d04:40707 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C40707%2C1731671091491.1731671093770 2024-11-15T11:44:53,788 INFO [RS:0;7adf9b3d9d04:40707 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671093770 2024-11-15T11:44:53,790 DEBUG [RS:0;7adf9b3d9d04:40707 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43409:43409),(127.0.0.1/127.0.0.1:44421:44421)] 2024-11-15T11:44:53,946 DEBUG [7adf9b3d9d04:38813 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T11:44:53,958 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:53,965 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,40707,1731671091491, state=OPENING 2024-11-15T11:44:54,004 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T11:44:54,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:54,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:44:54,014 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:44:54,014 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:44:54,015 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:44:54,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,40707,1731671091491}] 2024-11-15T11:44:54,198 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:44:54,201 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49915, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:44:54,215 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T11:44:54,215 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:44:54,219 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C40707%2C1731671091491.meta, suffix=.meta, logDir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491, archiveDir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs, maxLogs=32 2024-11-15T11:44:54,222 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C40707%2C1731671091491.meta.1731671094222.meta 2024-11-15T11:44:54,249 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.meta.1731671094222.meta 2024-11-15T11:44:54,255 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43409:43409),(127.0.0.1/127.0.0.1:44421:44421)] 2024-11-15T11:44:54,263 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:44:54,265 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T11:44:54,268 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T11:44:54,274 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T11:44:54,280 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T11:44:54,281 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:44:54,281 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T11:44:54,282 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T11:44:54,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:44:54,289 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:44:54,290 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:54,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:44:54,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:44:54,294 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:44:54,295 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:54,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:44:54,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:44:54,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:44:54,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:54,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:44:54,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:44:54,303 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:44:54,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:54,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:44:54,305 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:44:54,307 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740 2024-11-15T11:44:54,311 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740 2024-11-15T11:44:54,314 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:44:54,315 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:44:54,316 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:44:54,320 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:44:54,322 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831998, jitterRate=0.05794055759906769}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:44:54,322 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T11:44:54,324 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731671094282Writing region info on filesystem at 1731671094282Initializing all the Stores at 1731671094284 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671094285 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671094286 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671094286Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671094287 (+1 ms)Cleaning up temporary data from old regions at 1731671094315 (+28 ms)Running coprocessor post-open hooks at 1731671094322 (+7 ms)Region opened successfully at 1731671094324 (+2 ms) 2024-11-15T11:44:54,332 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731671094187 2024-11-15T11:44:54,346 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T11:44:54,347 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T11:44:54,349 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:54,352 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,40707,1731671091491, state=OPEN 2024-11-15T11:44:54,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:44:54,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:44:54,435 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:44:54,436 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:54,441 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:44:54,445 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T11:44:54,445 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,40707,1731671091491 in 418 msec 2024-11-15T11:44:54,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T11:44:54,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 916 msec 2024-11-15T11:44:54,456 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:44:54,457 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T11:44:54,479 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:44:54,481 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,40707,1731671091491, seqNum=-1] 2024-11-15T11:44:54,501 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:44:54,503 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54881, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:44:54,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2150 sec 2024-11-15T11:44:54,530 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731671094530, completionTime=-1 2024-11-15T11:44:54,533 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T11:44:54,534 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-15T11:44:54,568 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-15T11:44:54,568 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731671154568 2024-11-15T11:44:54,568 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731671214568 2024-11-15T11:44:54,568 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 34 msec 2024-11-15T11:44:54,573 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,38813,1731671090565-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:54,573 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,38813,1731671090565-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:54,574 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,38813,1731671090565-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:54,576 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7adf9b3d9d04:38813, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:54,577 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:54,577 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:54,584 DEBUG [master/7adf9b3d9d04:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T11:44:54,614 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.886sec 2024-11-15T11:44:54,616 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T11:44:54,618 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T11:44:54,619 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T11:44:54,620 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T11:44:54,621 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T11:44:54,622 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,38813,1731671090565-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:44:54,623 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,38813,1731671090565-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T11:44:54,648 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T11:44:54,649 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T11:44:54,650 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,38813,1731671090565-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:44:54,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:44:54,660 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-15T11:44:54,661 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-15T11:44:54,664 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7adf9b3d9d04,38813,-1 for getting cluster id 2024-11-15T11:44:54,668 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:44:54,679 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5e246bdf-754b-40f7-8500-92df056c7dbe' 2024-11-15T11:44:54,683 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:44:54,683 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5e246bdf-754b-40f7-8500-92df056c7dbe" 2024-11-15T11:44:54,686 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29842d21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:44:54,686 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7adf9b3d9d04,38813,-1] 2024-11-15T11:44:54,689 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:44:54,691 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:44:54,693 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52326, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:44:54,697 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:44:54,697 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:44:54,705 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,40707,1731671091491, seqNum=-1] 2024-11-15T11:44:54,706 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:44:54,709 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35732, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:44:54,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7adf9b3d9d04,38813,1731671090565 2024-11-15T11:44:54,728 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:44:54,734 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T11:44:54,738 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T11:44:54,742 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 7adf9b3d9d04,38813,1731671090565 2024-11-15T11:44:54,744 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2bfd4db8 2024-11-15T11:44:54,745 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T11:44:54,747 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52328, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T11:44:54,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38813 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T11:44:54,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38813 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T11:44:54,752 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38813 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:44:54,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38813 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-15T11:44:54,763 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:44:54,765 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38813 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-15T11:44:54,765 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:54,768 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:44:54,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:44:54,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741835_1011 (size=389) 2024-11-15T11:44:54,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741835_1011 (size=389) 2024-11-15T11:44:54,814 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 269f5330e3b0cfe416a1a385cd7fad73, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae 2024-11-15T11:44:54,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741836_1012 (size=72) 2024-11-15T11:44:54,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741836_1012 (size=72) 2024-11-15T11:44:54,826 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:44:54,826 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 269f5330e3b0cfe416a1a385cd7fad73, disabling compactions & flushes 2024-11-15T11:44:54,826 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:44:54,826 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:44:54,826 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. after waiting 0 ms 2024-11-15T11:44:54,827 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:44:54,827 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:44:54,827 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 269f5330e3b0cfe416a1a385cd7fad73: Waiting for close lock at 1731671094826Disabling compacts and flushes for region at 1731671094826Disabling writes for close at 1731671094826Writing region close event to WAL at 1731671094827 (+1 ms)Closed at 1731671094827 2024-11-15T11:44:54,829 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:44:54,833 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731671094829"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731671094829"}]},"ts":"1731671094829"} 2024-11-15T11:44:54,838 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T11:44:54,840 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:44:54,856 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731671094854"}]},"ts":"1731671094854"} 2024-11-15T11:44:54,867 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-15T11:44:54,869 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=269f5330e3b0cfe416a1a385cd7fad73, ASSIGN}] 2024-11-15T11:44:54,872 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=269f5330e3b0cfe416a1a385cd7fad73, ASSIGN 2024-11-15T11:44:54,874 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=269f5330e3b0cfe416a1a385cd7fad73, ASSIGN; state=OFFLINE, location=7adf9b3d9d04,40707,1731671091491; forceNewPlan=false, retain=false 2024-11-15T11:44:55,027 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=269f5330e3b0cfe416a1a385cd7fad73, regionState=OPENING, regionLocation=7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:55,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=269f5330e3b0cfe416a1a385cd7fad73, ASSIGN because future has completed 2024-11-15T11:44:55,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 269f5330e3b0cfe416a1a385cd7fad73, server=7adf9b3d9d04,40707,1731671091491}] 2024-11-15T11:44:55,206 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:44:55,207 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 269f5330e3b0cfe416a1a385cd7fad73, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:44:55,207 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,208 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:44:55,208 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,208 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,212 INFO [StoreOpener-269f5330e3b0cfe416a1a385cd7fad73-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,216 INFO [StoreOpener-269f5330e3b0cfe416a1a385cd7fad73-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 269f5330e3b0cfe416a1a385cd7fad73 columnFamilyName info 2024-11-15T11:44:55,217 DEBUG [StoreOpener-269f5330e3b0cfe416a1a385cd7fad73-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:44:55,218 INFO [StoreOpener-269f5330e3b0cfe416a1a385cd7fad73-1 {}] regionserver.HStore(327): Store=269f5330e3b0cfe416a1a385cd7fad73/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:44:55,219 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,221 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,222 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,223 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,223 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,226 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,231 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:44:55,232 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 269f5330e3b0cfe416a1a385cd7fad73; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696741, jitterRate=-0.11404924094676971}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:44:55,232 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:44:55,233 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 269f5330e3b0cfe416a1a385cd7fad73: Running coprocessor pre-open hook at 1731671095208Writing region info on filesystem at 1731671095208Initializing all the Stores at 1731671095211 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671095211Cleaning up temporary data from old regions at 1731671095223 (+12 ms)Running coprocessor post-open hooks at 1731671095232 (+9 ms)Region opened successfully at 1731671095233 (+1 ms) 2024-11-15T11:44:55,235 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73., pid=6, masterSystemTime=1731671095196 2024-11-15T11:44:55,240 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=269f5330e3b0cfe416a1a385cd7fad73, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,40707,1731671091491 2024-11-15T11:44:55,240 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:44:55,240 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:44:55,244 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 269f5330e3b0cfe416a1a385cd7fad73, server=7adf9b3d9d04,40707,1731671091491 because future has completed 2024-11-15T11:44:55,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T11:44:55,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 269f5330e3b0cfe416a1a385cd7fad73, server=7adf9b3d9d04,40707,1731671091491 in 208 msec 2024-11-15T11:44:55,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T11:44:55,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=269f5330e3b0cfe416a1a385cd7fad73, ASSIGN in 382 msec 2024-11-15T11:44:55,260 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:44:55,260 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731671095260"}]},"ts":"1731671095260"} 2024-11-15T11:44:55,264 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-15T11:44:55,266 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:44:55,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 511 msec 2024-11-15T11:44:59,795 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-15T11:44:59,844 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T11:44:59,845 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-15T11:45:01,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T11:45:01,017 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T11:45:01,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-15T11:45:01,019 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T11:45:01,020 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:45:01,020 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T11:45:01,020 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T11:45:01,020 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T11:45:04,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:45:04,864 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-15T11:45:04,869 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-15T11:45:04,876 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-15T11:45:04,877 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:45:04,877 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C40707%2C1731671091491.1731671104877 2024-11-15T11:45:04,887 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:04,887 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:04,887 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:04,887 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:04,887 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:04,888 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671093770 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671104877 2024-11-15T11:45:04,889 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44421:44421),(127.0.0.1/127.0.0.1:43409:43409)] 2024-11-15T11:45:04,889 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671093770 is not closed yet, will try archiving it next time 2024-11-15T11:45:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741833_1009 (size=451) 2024-11-15T11:45:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741833_1009 (size=451) 2024-11-15T11:45:04,895 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671093770 to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs/7adf9b3d9d04%2C40707%2C1731671091491.1731671093770 2024-11-15T11:45:04,898 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73., hostname=7adf9b3d9d04,40707,1731671091491, seqNum=2] 2024-11-15T11:45:16,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40707 {}] regionserver.HRegion(8855): Flush requested on 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:45:16,947 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 269f5330e3b0cfe416a1a385cd7fad73 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:45:17,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/31d74f4a23c744a0bf418c1693841aa0 is 1080, key is row0001/info:/1731671104901/Put/seqid=0 2024-11-15T11:45:17,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741838_1014 (size=12509) 2024-11-15T11:45:17,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741838_1014 (size=12509) 2024-11-15T11:45:17,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/31d74f4a23c744a0bf418c1693841aa0 2024-11-15T11:45:17,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/31d74f4a23c744a0bf418c1693841aa0 as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/31d74f4a23c744a0bf418c1693841aa0 2024-11-15T11:45:17,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/31d74f4a23c744a0bf418c1693841aa0, entries=7, sequenceid=11, filesize=12.2 K 2024-11-15T11:45:17,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 269f5330e3b0cfe416a1a385cd7fad73 in 139ms, sequenceid=11, compaction requested=false 2024-11-15T11:45:17,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 269f5330e3b0cfe416a1a385cd7fad73: 2024-11-15T11:45:19,623 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:45:24,956 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C40707%2C1731671091491.1731671124956 2024-11-15T11:45:25,169 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK], DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK]] 2024-11-15T11:45:25,170 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:25,170 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:25,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:25,170 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:25,170 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:25,171 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671104877 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671124956 2024-11-15T11:45:25,172 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43409:43409),(127.0.0.1/127.0.0.1:44421:44421)] 2024-11-15T11:45:25,172 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671104877 is not closed yet, will try archiving it next time 2024-11-15T11:45:25,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741837_1013 (size=12399) 2024-11-15T11:45:25,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741837_1013 (size=12399) 2024-11-15T11:45:25,377 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:27,582 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:29,788 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:31,994 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:31,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40707 {}] regionserver.HRegion(8855): Flush requested on 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:45:31,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 269f5330e3b0cfe416a1a385cd7fad73 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:45:32,196 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:32,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/fdff74d9af524490b2b7ff9b65ec1b22 is 1080, key is row0008/info:/1731671118945/Put/seqid=0 2024-11-15T11:45:32,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741840_1016 (size=12509) 2024-11-15T11:45:32,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741840_1016 (size=12509) 2024-11-15T11:45:32,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/fdff74d9af524490b2b7ff9b65ec1b22 2024-11-15T11:45:32,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/fdff74d9af524490b2b7ff9b65ec1b22 as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/fdff74d9af524490b2b7ff9b65ec1b22 2024-11-15T11:45:32,240 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/fdff74d9af524490b2b7ff9b65ec1b22, entries=7, sequenceid=21, filesize=12.2 K 2024-11-15T11:45:32,443 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:32,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 269f5330e3b0cfe416a1a385cd7fad73 in 449ms, sequenceid=21, compaction requested=false 2024-11-15T11:45:32,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 269f5330e3b0cfe416a1a385cd7fad73: 2024-11-15T11:45:32,444 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-15T11:45:32,444 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:45:32,447 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/31d74f4a23c744a0bf418c1693841aa0 because midkey is the same as first or last row 2024-11-15T11:45:34,198 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:35,198 INFO [master/7adf9b3d9d04:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T11:45:35,198 INFO [master/7adf9b3d9d04:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T11:45:36,402 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:36,405 WARN [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:36,406 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C40707%2C1731671091491:(num 1731671124956) roll requested 2024-11-15T11:45:36,407 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C40707%2C1731671091491.1731671136406 2024-11-15T11:45:36,629 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 220 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:36,630 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:36,630 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:36,630 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:36,630 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:36,630 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:36,631 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671124956 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671136406 2024-11-15T11:45:36,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741839_1015 (size=7739) 2024-11-15T11:45:36,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741839_1015 (size=7739) 2024-11-15T11:45:36,660 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43409:43409),(127.0.0.1/127.0.0.1:44421:44421)] 2024-11-15T11:45:36,660 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671124956 is not closed yet, will try archiving it next time 2024-11-15T11:45:36,660 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671104877 to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs/7adf9b3d9d04%2C40707%2C1731671091491.1731671104877 2024-11-15T11:45:38,608 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:40,208 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 269f5330e3b0cfe416a1a385cd7fad73, had cached 0 bytes from a total of 25018 2024-11-15T11:45:40,814 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:43,024 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:45,233 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:47,236 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:45:47,236 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C40707%2C1731671091491.1731671147236 2024-11-15T11:45:49,623 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:45:52,246 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:52,248 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:52,248 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C40707%2C1731671091491:(num 1731671147236) roll requested 2024-11-15T11:45:52,248 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:52,248 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:52,249 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:52,249 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:52,249 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:45:52,249 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671136406 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671147236 2024-11-15T11:45:52,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741841_1017 (size=4753) 2024-11-15T11:45:52,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741841_1017 (size=4753) 2024-11-15T11:45:52,259 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43409:43409),(127.0.0.1/127.0.0.1:44421:44421)] 2024-11-15T11:45:52,260 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671136406 is not closed yet, will try archiving it next time 2024-11-15T11:45:52,260 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C40707%2C1731671091491.1731671152260 2024-11-15T11:45:57,263 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:57,263 WARN [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:57,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40707 {}] regionserver.HRegion(8855): Flush requested on 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:45:57,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 269f5330e3b0cfe416a1a385cd7fad73 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:45:57,268 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:57,268 WARN [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:45:59,264 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:46:02,266 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:46:02,266 WARN [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK]] 2024-11-15T11:46:02,266 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:02,266 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:02,267 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:02,267 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:02,267 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:02,267 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671147236 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671152260 2024-11-15T11:46:02,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741842_1018 (size=1569) 2024-11-15T11:46:02,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741842_1018 (size=1569) 2024-11-15T11:46:02,285 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44421:44421),(127.0.0.1/127.0.0.1:43409:43409)] 2024-11-15T11:46:02,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/cf29f0901685493890e2f7fe0626de9b is 1080, key is row0015/info:/1731671133996/Put/seqid=0 2024-11-15T11:46:02,285 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C40707%2C1731671091491:(num 1731671152260) roll requested 2024-11-15T11:46:02,286 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C40707%2C1731671091491.1731671162286 2024-11-15T11:46:02,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741844_1020 (size=12509) 2024-11-15T11:46:02,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741844_1020 (size=12509) 2024-11-15T11:46:02,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/cf29f0901685493890e2f7fe0626de9b 2024-11-15T11:46:02,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/cf29f0901685493890e2f7fe0626de9b as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/cf29f0901685493890e2f7fe0626de9b 2024-11-15T11:46:02,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/cf29f0901685493890e2f7fe0626de9b, entries=7, sequenceid=31, filesize=12.2 K 2024-11-15T11:46:07,298 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK], DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK]] 2024-11-15T11:46:07,298 WARN [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK], DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK]] 2024-11-15T11:46:07,323 INFO [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK], DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK]] 2024-11-15T11:46:07,323 WARN [FSHLog-0-hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae-prefix:7adf9b3d9d04,40707,1731671091491 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41851,DS-10225212-64ae-479a-8297-cbe5feae3f81,DISK], DatanodeInfoWithStorage[127.0.0.1:45289,DS-505d59eb-50ee-40e2-ba01-c9456fc0b4bf,DISK]] 2024-11-15T11:46:07,323 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 269f5330e3b0cfe416a1a385cd7fad73 in 10060ms, sequenceid=31, compaction requested=true 2024-11-15T11:46:07,323 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 269f5330e3b0cfe416a1a385cd7fad73: 2024-11-15T11:46:07,323 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,323 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,323 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-15T11:46:07,323 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,323 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:07,324 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,324 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/31d74f4a23c744a0bf418c1693841aa0 because midkey is the same as first or last row 2024-11-15T11:46:07,324 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671152260 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671162286 2024-11-15T11:46:07,325 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43409:43409),(127.0.0.1/127.0.0.1:44421:44421)] 2024-11-15T11:46:07,325 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671152260 is not closed yet, will try archiving it next time 2024-11-15T11:46:07,325 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C40707%2C1731671091491:(num 1731671162286) roll requested 2024-11-15T11:46:07,325 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671124956 to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs/7adf9b3d9d04%2C40707%2C1731671091491.1731671124956 2024-11-15T11:46:07,325 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C40707%2C1731671091491.1731671167325 2024-11-15T11:46:07,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 269f5330e3b0cfe416a1a385cd7fad73:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:46:07,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741843_1019 (size=438) 2024-11-15T11:46:07,327 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671136406 to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs/7adf9b3d9d04%2C40707%2C1731671091491.1731671136406 2024-11-15T11:46:07,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:46:07,329 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:46:07,329 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671147236 to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs/7adf9b3d9d04%2C40707%2C1731671091491.1731671147236 2024-11-15T11:46:07,331 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671152260 to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs/7adf9b3d9d04%2C40707%2C1731671091491.1731671152260 2024-11-15T11:46:07,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741843_1019 (size=438) 2024-11-15T11:46:07,335 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:46:07,336 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.HStore(1541): 269f5330e3b0cfe416a1a385cd7fad73/info is initiating minor compaction (all files) 2024-11-15T11:46:07,336 INFO [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 269f5330e3b0cfe416a1a385cd7fad73/info in TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:46:07,337 INFO [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/31d74f4a23c744a0bf418c1693841aa0, hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/fdff74d9af524490b2b7ff9b65ec1b22, hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/cf29f0901685493890e2f7fe0626de9b] into tmpdir=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp, totalSize=36.6 K 2024-11-15T11:46:07,338 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] compactions.Compactor(225): Compacting 31d74f4a23c744a0bf418c1693841aa0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731671104901 2024-11-15T11:46:07,339 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] compactions.Compactor(225): Compacting fdff74d9af524490b2b7ff9b65ec1b22, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731671118945 2024-11-15T11:46:07,340 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] compactions.Compactor(225): Compacting cf29f0901685493890e2f7fe0626de9b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731671133996 2024-11-15T11:46:07,340 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,340 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,340 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,340 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,341 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,341 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671162286 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671167325 2024-11-15T11:46:07,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741845_1021 (size=93) 2024-11-15T11:46:07,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741845_1021 (size=93) 2024-11-15T11:46:07,344 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671162286 to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs/7adf9b3d9d04%2C40707%2C1731671091491.1731671162286 2024-11-15T11:46:07,347 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43409:43409),(127.0.0.1/127.0.0.1:44421:44421)] 2024-11-15T11:46:07,348 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C40707%2C1731671091491.1731671167348 2024-11-15T11:46:07,356 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,356 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,356 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,356 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,357 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:07,357 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671167325 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/WALs/7adf9b3d9d04,40707,1731671091491/7adf9b3d9d04%2C40707%2C1731671091491.1731671167348 2024-11-15T11:46:07,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741846_1022 (size=1258) 2024-11-15T11:46:07,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741846_1022 (size=1258) 2024-11-15T11:46:07,375 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43409:43409),(127.0.0.1/127.0.0.1:44421:44421)] 2024-11-15T11:46:07,380 INFO [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 269f5330e3b0cfe416a1a385cd7fad73#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:46:07,382 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/16934a0c54b64e9a9fc8eb5a9a40c782 is 1080, key is row0001/info:/1731671104901/Put/seqid=0 2024-11-15T11:46:07,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741848_1024 (size=27710) 2024-11-15T11:46:07,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741848_1024 (size=27710) 2024-11-15T11:46:07,400 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/16934a0c54b64e9a9fc8eb5a9a40c782 as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/16934a0c54b64e9a9fc8eb5a9a40c782 2024-11-15T11:46:07,418 INFO [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 269f5330e3b0cfe416a1a385cd7fad73/info of 269f5330e3b0cfe416a1a385cd7fad73 into 16934a0c54b64e9a9fc8eb5a9a40c782(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:46:07,418 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 269f5330e3b0cfe416a1a385cd7fad73: 2024-11-15T11:46:07,421 INFO [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73., storeName=269f5330e3b0cfe416a1a385cd7fad73/info, priority=13, startTime=1731671167325; duration=0sec 2024-11-15T11:46:07,421 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T11:46:07,421 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:07,422 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/16934a0c54b64e9a9fc8eb5a9a40c782 because midkey is the same as first or last row 2024-11-15T11:46:07,422 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T11:46:07,422 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:07,422 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/16934a0c54b64e9a9fc8eb5a9a40c782 because midkey is the same as first or last row 2024-11-15T11:46:07,422 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T11:46:07,422 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:07,422 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/16934a0c54b64e9a9fc8eb5a9a40c782 because midkey is the same as first or last row 2024-11-15T11:46:07,423 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:46:07,423 DEBUG [RS:0;7adf9b3d9d04:40707-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 269f5330e3b0cfe416a1a385cd7fad73:info 2024-11-15T11:46:19,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40707 {}] regionserver.HRegion(8855): Flush requested on 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:46:19,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 269f5330e3b0cfe416a1a385cd7fad73 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:46:19,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/29a76984e9754ec180c25fb717e81dbc is 1080, key is row0022/info:/1731671167349/Put/seqid=0 2024-11-15T11:46:19,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741849_1025 (size=12509) 2024-11-15T11:46:19,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741849_1025 (size=12509) 2024-11-15T11:46:19,396 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/29a76984e9754ec180c25fb717e81dbc 2024-11-15T11:46:19,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/29a76984e9754ec180c25fb717e81dbc as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/29a76984e9754ec180c25fb717e81dbc 2024-11-15T11:46:19,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/29a76984e9754ec180c25fb717e81dbc, entries=7, sequenceid=42, filesize=12.2 K 2024-11-15T11:46:19,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 269f5330e3b0cfe416a1a385cd7fad73 in 39ms, sequenceid=42, compaction requested=false 2024-11-15T11:46:19,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 269f5330e3b0cfe416a1a385cd7fad73: 2024-11-15T11:46:19,418 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-15T11:46:19,418 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:19,418 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/16934a0c54b64e9a9fc8eb5a9a40c782 because midkey is the same as first or last row 2024-11-15T11:46:19,623 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:46:25,209 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 269f5330e3b0cfe416a1a385cd7fad73, had cached 0 bytes from a total of 40219 2024-11-15T11:46:27,390 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T11:46:27,391 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:46:27,391 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:46:27,396 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:27,396 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:27,396 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:46:27,396 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T11:46:27,396 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2135136293, stopped=false 2024-11-15T11:46:27,397 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7adf9b3d9d04,38813,1731671090565 2024-11-15T11:46:27,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:46:27,450 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:46:27,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:27,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:46:27,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:27,450 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:46:27,450 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:46:27,450 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:27,451 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7adf9b3d9d04,40707,1731671091491' ***** 2024-11-15T11:46:27,451 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:46:27,451 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:46:27,451 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:46:27,451 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:46:27,451 INFO [RS:0;7adf9b3d9d04:40707 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:46:27,451 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:46:27,451 INFO [RS:0;7adf9b3d9d04:40707 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:46:27,451 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(3091): Received CLOSE for 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:46:27,453 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(959): stopping server 7adf9b3d9d04,40707,1731671091491 2024-11-15T11:46:27,453 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:46:27,453 INFO [RS:0;7adf9b3d9d04:40707 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7adf9b3d9d04:40707. 2024-11-15T11:46:27,453 DEBUG [RS:0;7adf9b3d9d04:40707 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:46:27,454 DEBUG [RS:0;7adf9b3d9d04:40707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:27,454 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 269f5330e3b0cfe416a1a385cd7fad73, disabling compactions & flushes 2024-11-15T11:46:27,454 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:46:27,454 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:46:27,454 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:46:27,454 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:46:27,454 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. after waiting 0 ms 2024-11-15T11:46:27,454 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:46:27,454 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:46:27,454 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T11:46:27,454 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 269f5330e3b0cfe416a1a385cd7fad73 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-15T11:46:27,454 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T11:46:27,454 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:46:27,454 DEBUG [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 269f5330e3b0cfe416a1a385cd7fad73=TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.} 2024-11-15T11:46:27,454 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:46:27,455 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:46:27,455 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:46:27,455 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:46:27,455 DEBUG [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 269f5330e3b0cfe416a1a385cd7fad73 2024-11-15T11:46:27,455 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-15T11:46:27,462 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/1cb8c05d80394ecb80da3bff4a5f67ac is 1080, key is row0029/info:/1731671181381/Put/seqid=0 2024-11-15T11:46:27,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741850_1026 (size=8193) 2024-11-15T11:46:27,477 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/.tmp/info/5ec33c1e291342e380f3cbcdd8bdf889 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73./info:regioninfo/1731671095240/Put/seqid=0 2024-11-15T11:46:27,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741850_1026 (size=8193) 2024-11-15T11:46:27,479 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/1cb8c05d80394ecb80da3bff4a5f67ac 2024-11-15T11:46:27,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741851_1027 (size=7016) 2024-11-15T11:46:27,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741851_1027 (size=7016) 2024-11-15T11:46:27,489 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/.tmp/info/5ec33c1e291342e380f3cbcdd8bdf889 2024-11-15T11:46:27,491 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/.tmp/info/1cb8c05d80394ecb80da3bff4a5f67ac as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/1cb8c05d80394ecb80da3bff4a5f67ac 2024-11-15T11:46:27,506 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/1cb8c05d80394ecb80da3bff4a5f67ac, entries=3, sequenceid=48, filesize=8.0 K 2024-11-15T11:46:27,508 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 269f5330e3b0cfe416a1a385cd7fad73 in 54ms, sequenceid=48, compaction requested=true 2024-11-15T11:46:27,511 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/31d74f4a23c744a0bf418c1693841aa0, hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/fdff74d9af524490b2b7ff9b65ec1b22, hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/cf29f0901685493890e2f7fe0626de9b] to archive 2024-11-15T11:46:27,515 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T11:46:27,519 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/31d74f4a23c744a0bf418c1693841aa0 to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/archive/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/31d74f4a23c744a0bf418c1693841aa0 2024-11-15T11:46:27,521 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/.tmp/ns/f0bfb4cfb57b4a8496e12f33e547c860 is 43, key is default/ns:d/1731671094508/Put/seqid=0 2024-11-15T11:46:27,523 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/fdff74d9af524490b2b7ff9b65ec1b22 to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/archive/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/fdff74d9af524490b2b7ff9b65ec1b22 2024-11-15T11:46:27,526 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/cf29f0901685493890e2f7fe0626de9b to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/archive/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/info/cf29f0901685493890e2f7fe0626de9b 2024-11-15T11:46:27,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741852_1028 (size=5153) 2024-11-15T11:46:27,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741852_1028 (size=5153) 2024-11-15T11:46:27,532 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/.tmp/ns/f0bfb4cfb57b4a8496e12f33e547c860 2024-11-15T11:46:27,541 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7adf9b3d9d04:38813 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-15T11:46:27,546 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [31d74f4a23c744a0bf418c1693841aa0=12509, fdff74d9af524490b2b7ff9b65ec1b22=12509, cf29f0901685493890e2f7fe0626de9b=12509] 2024-11-15T11:46:27,552 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/default/TestLogRolling-testSlowSyncLogRolling/269f5330e3b0cfe416a1a385cd7fad73/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-15T11:46:27,555 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:46:27,555 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 269f5330e3b0cfe416a1a385cd7fad73: Waiting for close lock at 1731671187453Running coprocessor pre-close hooks at 1731671187454 (+1 ms)Disabling compacts and flushes for region at 1731671187454Disabling writes for close at 1731671187454Obtaining lock to block concurrent updates at 1731671187454Preparing flush snapshotting stores in 269f5330e3b0cfe416a1a385cd7fad73 at 1731671187454Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731671187455 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. at 1731671187456 (+1 ms)Flushing 269f5330e3b0cfe416a1a385cd7fad73/info: creating writer at 1731671187456Flushing 269f5330e3b0cfe416a1a385cd7fad73/info: appending metadata at 1731671187461 (+5 ms)Flushing 269f5330e3b0cfe416a1a385cd7fad73/info: closing flushed file at 1731671187461Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@276ad74a: reopening flushed file at 1731671187489 (+28 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 269f5330e3b0cfe416a1a385cd7fad73 in 54ms, sequenceid=48, compaction requested=true at 1731671187508 (+19 ms)Writing region close event to WAL at 1731671187547 (+39 ms)Running coprocessor post-close hooks at 1731671187553 (+6 ms)Closed at 1731671187555 (+2 ms) 2024-11-15T11:46:27,556 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731671094749.269f5330e3b0cfe416a1a385cd7fad73. 2024-11-15T11:46:27,559 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/.tmp/table/e230b77ae96341bab9bb6e68c0f95bc4 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731671095260/Put/seqid=0 2024-11-15T11:46:27,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741853_1029 (size=5396) 2024-11-15T11:46:27,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741853_1029 (size=5396) 2024-11-15T11:46:27,570 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/.tmp/table/e230b77ae96341bab9bb6e68c0f95bc4 2024-11-15T11:46:27,580 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/.tmp/info/5ec33c1e291342e380f3cbcdd8bdf889 as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/info/5ec33c1e291342e380f3cbcdd8bdf889 2024-11-15T11:46:27,590 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/info/5ec33c1e291342e380f3cbcdd8bdf889, entries=10, sequenceid=11, filesize=6.9 K 2024-11-15T11:46:27,592 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/.tmp/ns/f0bfb4cfb57b4a8496e12f33e547c860 as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/ns/f0bfb4cfb57b4a8496e12f33e547c860 2024-11-15T11:46:27,603 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/ns/f0bfb4cfb57b4a8496e12f33e547c860, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T11:46:27,605 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/.tmp/table/e230b77ae96341bab9bb6e68c0f95bc4 as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/table/e230b77ae96341bab9bb6e68c0f95bc4 2024-11-15T11:46:27,615 INFO [regionserver/7adf9b3d9d04:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:46:27,615 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/table/e230b77ae96341bab9bb6e68c0f95bc4, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T11:46:27,616 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 161ms, sequenceid=11, compaction requested=false 2024-11-15T11:46:27,617 INFO [regionserver/7adf9b3d9d04:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T11:46:27,617 INFO [regionserver/7adf9b3d9d04:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T11:46:27,622 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T11:46:27,623 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:46:27,623 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:46:27,624 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671187454Running coprocessor pre-close hooks at 1731671187454Disabling compacts and flushes for region at 1731671187454Disabling writes for close at 1731671187455 (+1 ms)Obtaining lock to block concurrent updates at 1731671187455Preparing flush snapshotting stores in 1588230740 at 1731671187455Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731671187455Flushing stores of hbase:meta,,1.1588230740 at 1731671187456 (+1 ms)Flushing 1588230740/info: creating writer at 1731671187456Flushing 1588230740/info: appending metadata at 1731671187476 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731671187476Flushing 1588230740/ns: creating writer at 1731671187504 (+28 ms)Flushing 1588230740/ns: appending metadata at 1731671187521 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731671187521Flushing 1588230740/table: creating writer at 1731671187540 (+19 ms)Flushing 1588230740/table: appending metadata at 1731671187558 (+18 ms)Flushing 1588230740/table: closing flushed file at 1731671187558Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33aa0f56: reopening flushed file at 1731671187578 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3257e369: reopening flushed file at 1731671187591 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56cb1e6c: reopening flushed file at 1731671187603 (+12 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 161ms, sequenceid=11, compaction requested=false at 1731671187616 (+13 ms)Writing region close event to WAL at 1731671187618 (+2 ms)Running coprocessor post-close hooks at 1731671187623 (+5 ms)Closed at 1731671187623 2024-11-15T11:46:27,624 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T11:46:27,655 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(976): stopping server 7adf9b3d9d04,40707,1731671091491; all regions closed. 2024-11-15T11:46:27,657 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:27,657 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:27,657 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:27,657 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:27,657 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:27,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741834_1010 (size=3066) 2024-11-15T11:46:27,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741834_1010 (size=3066) 2024-11-15T11:46:27,664 DEBUG [RS:0;7adf9b3d9d04:40707 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs 2024-11-15T11:46:27,664 INFO [RS:0;7adf9b3d9d04:40707 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C40707%2C1731671091491.meta:.meta(num 1731671094222) 2024-11-15T11:46:27,664 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:27,664 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:27,664 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:27,664 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:27,665 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:27,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741847_1023 (size=12695) 2024-11-15T11:46:27,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741847_1023 (size=12695) 2024-11-15T11:46:28,075 DEBUG [RS:0;7adf9b3d9d04:40707 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/oldWALs 2024-11-15T11:46:28,076 INFO [RS:0;7adf9b3d9d04:40707 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C40707%2C1731671091491:(num 1731671167348) 2024-11-15T11:46:28,076 DEBUG [RS:0;7adf9b3d9d04:40707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:28,076 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:46:28,076 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:46:28,076 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.ChoreService(370): Chore service for: regionserver/7adf9b3d9d04:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T11:46:28,077 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:46:28,077 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:46:28,077 INFO [RS:0;7adf9b3d9d04:40707 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40707 2024-11-15T11:46:28,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7adf9b3d9d04,40707,1731671091491 2024-11-15T11:46:28,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:46:28,133 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:46:28,141 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7adf9b3d9d04,40707,1731671091491] 2024-11-15T11:46:28,149 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7adf9b3d9d04,40707,1731671091491 already deleted, retry=false 2024-11-15T11:46:28,149 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7adf9b3d9d04,40707,1731671091491 expired; onlineServers=0 2024-11-15T11:46:28,150 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7adf9b3d9d04,38813,1731671090565' ***** 2024-11-15T11:46:28,150 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T11:46:28,150 INFO [M:0;7adf9b3d9d04:38813 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:46:28,150 INFO [M:0;7adf9b3d9d04:38813 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:46:28,150 DEBUG [M:0;7adf9b3d9d04:38813 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T11:46:28,150 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T11:46:28,150 DEBUG [M:0;7adf9b3d9d04:38813 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T11:46:28,150 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671093417 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671093417,5,FailOnTimeoutGroup] 2024-11-15T11:46:28,151 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671093419 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671093419,5,FailOnTimeoutGroup] 2024-11-15T11:46:28,151 INFO [M:0;7adf9b3d9d04:38813 {}] hbase.ChoreService(370): Chore service for: master/7adf9b3d9d04:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T11:46:28,151 INFO [M:0;7adf9b3d9d04:38813 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:46:28,151 DEBUG [M:0;7adf9b3d9d04:38813 {}] master.HMaster(1795): Stopping service threads 2024-11-15T11:46:28,151 INFO [M:0;7adf9b3d9d04:38813 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T11:46:28,151 INFO [M:0;7adf9b3d9d04:38813 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:46:28,152 INFO [M:0;7adf9b3d9d04:38813 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T11:46:28,152 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T11:46:28,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T11:46:28,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:28,158 DEBUG [M:0;7adf9b3d9d04:38813 {}] zookeeper.ZKUtil(347): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T11:46:28,158 WARN [M:0;7adf9b3d9d04:38813 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T11:46:28,159 INFO [M:0;7adf9b3d9d04:38813 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/.lastflushedseqids 2024-11-15T11:46:28,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741854_1030 (size=130) 2024-11-15T11:46:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741854_1030 (size=130) 2024-11-15T11:46:28,176 INFO [M:0;7adf9b3d9d04:38813 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T11:46:28,176 INFO [M:0;7adf9b3d9d04:38813 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T11:46:28,177 DEBUG [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:46:28,177 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:28,177 DEBUG [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:28,177 DEBUG [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:46:28,177 DEBUG [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:28,177 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-15T11:46:28,196 DEBUG [M:0;7adf9b3d9d04:38813 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2feac845c49947529cdbfb27183acd8b is 82, key is hbase:meta,,1/info:regioninfo/1731671094349/Put/seqid=0 2024-11-15T11:46:28,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741855_1031 (size=5672) 2024-11-15T11:46:28,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741855_1031 (size=5672) 2024-11-15T11:46:28,205 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2feac845c49947529cdbfb27183acd8b 2024-11-15T11:46:28,230 DEBUG [M:0;7adf9b3d9d04:38813 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec88d9f189854ba7a7aff883770f6fed is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731671095270/Put/seqid=0 2024-11-15T11:46:28,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741856_1032 (size=6247) 2024-11-15T11:46:28,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741856_1032 (size=6247) 2024-11-15T11:46:28,237 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec88d9f189854ba7a7aff883770f6fed 2024-11-15T11:46:28,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:46:28,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40707-0x1013f9974e80001, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:46:28,242 INFO [RS:0;7adf9b3d9d04:40707 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:46:28,242 INFO [RS:0;7adf9b3d9d04:40707 {}] regionserver.HRegionServer(1031): Exiting; stopping=7adf9b3d9d04,40707,1731671091491; zookeeper connection closed. 2024-11-15T11:46:28,243 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e82b381 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e82b381 2024-11-15T11:46:28,243 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T11:46:28,244 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ec88d9f189854ba7a7aff883770f6fed 2024-11-15T11:46:28,261 DEBUG [M:0;7adf9b3d9d04:38813 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f94d6df9d3d547ce85ef4db14dc2a6b1 is 69, key is 7adf9b3d9d04,40707,1731671091491/rs:state/1731671093495/Put/seqid=0 2024-11-15T11:46:28,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741857_1033 (size=5156) 2024-11-15T11:46:28,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741857_1033 (size=5156) 2024-11-15T11:46:28,268 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f94d6df9d3d547ce85ef4db14dc2a6b1 2024-11-15T11:46:28,291 DEBUG [M:0;7adf9b3d9d04:38813 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c7458d754fc43e1ba2f8eeb78b7367c is 52, key is load_balancer_on/state:d/1731671094732/Put/seqid=0 2024-11-15T11:46:28,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741858_1034 (size=5056) 2024-11-15T11:46:28,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741858_1034 (size=5056) 2024-11-15T11:46:28,306 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c7458d754fc43e1ba2f8eeb78b7367c 2024-11-15T11:46:28,315 DEBUG [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2feac845c49947529cdbfb27183acd8b as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2feac845c49947529cdbfb27183acd8b 2024-11-15T11:46:28,325 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2feac845c49947529cdbfb27183acd8b, entries=8, sequenceid=59, filesize=5.5 K 2024-11-15T11:46:28,327 DEBUG [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec88d9f189854ba7a7aff883770f6fed as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ec88d9f189854ba7a7aff883770f6fed 2024-11-15T11:46:28,334 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ec88d9f189854ba7a7aff883770f6fed 2024-11-15T11:46:28,334 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ec88d9f189854ba7a7aff883770f6fed, entries=6, sequenceid=59, filesize=6.1 K 2024-11-15T11:46:28,336 DEBUG [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f94d6df9d3d547ce85ef4db14dc2a6b1 as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f94d6df9d3d547ce85ef4db14dc2a6b1 2024-11-15T11:46:28,343 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f94d6df9d3d547ce85ef4db14dc2a6b1, entries=1, sequenceid=59, filesize=5.0 K 2024-11-15T11:46:28,345 DEBUG [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c7458d754fc43e1ba2f8eeb78b7367c as hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1c7458d754fc43e1ba2f8eeb78b7367c 2024-11-15T11:46:28,353 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1c7458d754fc43e1ba2f8eeb78b7367c, entries=1, sequenceid=59, filesize=4.9 K 2024-11-15T11:46:28,355 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 177ms, sequenceid=59, compaction requested=false 2024-11-15T11:46:28,359 INFO [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:28,359 DEBUG [M:0;7adf9b3d9d04:38813 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671188176Disabling compacts and flushes for region at 1731671188176Disabling writes for close at 1731671188177 (+1 ms)Obtaining lock to block concurrent updates at 1731671188177Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731671188177Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731671188178 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731671188179 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731671188179Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731671188196 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731671188196Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731671188212 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731671188229 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731671188229Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731671188244 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731671188260 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731671188261 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731671188274 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731671188290 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731671188290Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20d4088f: reopening flushed file at 1731671188314 (+24 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10784359: reopening flushed file at 1731671188325 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49ff6deb: reopening flushed file at 1731671188334 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19d30950: reopening flushed file at 1731671188344 (+10 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 177ms, sequenceid=59, compaction requested=false at 1731671188355 (+11 ms)Writing region close event to WAL at 1731671188359 (+4 ms)Closed at 1731671188359 2024-11-15T11:46:28,360 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:28,360 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:28,360 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:28,361 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:28,361 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:28,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741830_1006 (size=27973) 2024-11-15T11:46:28,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741830_1006 (size=27973) 2024-11-15T11:46:28,364 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:46:28,364 INFO [M:0;7adf9b3d9d04:38813 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T11:46:28,364 INFO [M:0;7adf9b3d9d04:38813 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38813 2024-11-15T11:46:28,365 INFO [M:0;7adf9b3d9d04:38813 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:46:28,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:46:28,499 INFO [M:0;7adf9b3d9d04:38813 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:46:28,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38813-0x1013f9974e80000, quorum=127.0.0.1:52542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:46:28,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:28,507 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:46:28,507 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:46:28,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:46:28,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/hadoop.log.dir/,STOPPED} 2024-11-15T11:46:28,511 WARN [BP-1474210229-172.17.0.2-1731671086590 heartbeating to localhost/127.0.0.1:37621 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:46:28,511 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:46:28,511 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:46:28,511 WARN [BP-1474210229-172.17.0.2-1731671086590 heartbeating to localhost/127.0.0.1:37621 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1474210229-172.17.0.2-1731671086590 (Datanode Uuid 86cb6d34-33c4-4994-b4f5-094970d66c80) service to localhost/127.0.0.1:37621 2024-11-15T11:46:28,513 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/data/data3/current/BP-1474210229-172.17.0.2-1731671086590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:28,513 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/data/data4/current/BP-1474210229-172.17.0.2-1731671086590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:28,513 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:46:28,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:28,516 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:46:28,516 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:46:28,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:46:28,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/hadoop.log.dir/,STOPPED} 2024-11-15T11:46:28,518 WARN [BP-1474210229-172.17.0.2-1731671086590 heartbeating to localhost/127.0.0.1:37621 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:46:28,518 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:46:28,518 WARN [BP-1474210229-172.17.0.2-1731671086590 heartbeating to localhost/127.0.0.1:37621 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1474210229-172.17.0.2-1731671086590 (Datanode Uuid 30a7de63-de4a-4d6e-a559-3261a411aa0c) service to localhost/127.0.0.1:37621 2024-11-15T11:46:28,518 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:46:28,519 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/data/data1/current/BP-1474210229-172.17.0.2-1731671086590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:28,519 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/cluster_1240ca5f-19ad-af6e-66ec-b0f14fcc9bba/data/data2/current/BP-1474210229-172.17.0.2-1731671086590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:28,520 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:46:28,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:46:28,530 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:46:28,530 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:46:28,531 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:46:28,531 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/hadoop.log.dir/,STOPPED} 2024-11-15T11:46:28,539 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T11:46:28,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T11:46:28,582 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=76 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37621 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37621 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37621 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: master/7adf9b3d9d04:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/7adf9b3d9d04:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37621 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37621 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37621 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/7adf9b3d9d04:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:37621 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@53abf30 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37621 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=244 (was 205) - SystemLoadAverage LEAK? -, ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=11653 (was 12008) 2024-11-15T11:46:28,591 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=77, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=244, ProcessCount=12, AvailableMemoryMB=11652 2024-11-15T11:46:28,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T11:46:28,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/hadoop.log.dir so I do NOT create it in target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67 2024-11-15T11:46:28,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38013aa2-541c-af28-f77b-965bd7a3cc8a/hadoop.tmp.dir so I do NOT create it in target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67 2024-11-15T11:46:28,592 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff, deleteOnExit=true 2024-11-15T11:46:28,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T11:46:28,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/test.cache.data in system properties and HBase conf 2024-11-15T11:46:28,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T11:46:28,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/hadoop.log.dir in system properties and HBase conf 2024-11-15T11:46:28,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T11:46:28,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T11:46:28,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T11:46:28,593 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T11:46:28,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:46:28,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:46:28,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T11:46:28,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:46:28,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T11:46:28,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T11:46:28,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:46:28,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:46:28,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T11:46:28,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/nfs.dump.dir in system properties and HBase conf 2024-11-15T11:46:28,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/java.io.tmpdir in system properties and HBase conf 2024-11-15T11:46:28,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:46:28,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T11:46:28,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T11:46:28,608 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:46:28,870 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:28,877 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:46:28,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:46:28,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:46:28,879 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:46:28,880 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:28,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:46:28,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:46:28,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55cb1221{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/java.io.tmpdir/jetty-localhost-35771-hadoop-hdfs-3_4_1-tests_jar-_-any-18049857040732109573/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:46:28,997 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:35771} 2024-11-15T11:46:28,997 INFO [Time-limited test {}] server.Server(415): Started @104751ms 2024-11-15T11:46:29,012 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:46:29,234 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:29,239 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:46:29,240 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:46:29,240 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:46:29,240 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:46:29,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:46:29,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:46:29,349 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4595827f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/java.io.tmpdir/jetty-localhost-41573-hadoop-hdfs-3_4_1-tests_jar-_-any-6747503293578723292/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:29,349 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:41573} 2024-11-15T11:46:29,349 INFO [Time-limited test {}] server.Server(415): Started @105103ms 2024-11-15T11:46:29,351 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:46:29,397 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:29,402 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:46:29,403 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:46:29,403 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:46:29,403 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:46:29,404 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:46:29,404 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:46:29,521 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@da5059a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/java.io.tmpdir/jetty-localhost-45453-hadoop-hdfs-3_4_1-tests_jar-_-any-10929694389935035461/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:29,521 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:45453} 2024-11-15T11:46:29,521 INFO [Time-limited test {}] server.Server(415): Started @105275ms 2024-11-15T11:46:29,523 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:46:30,069 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/data/data1/current/BP-1924943516-172.17.0.2-1731671188622/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:30,070 WARN [Thread-454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/data/data2/current/BP-1924943516-172.17.0.2-1731671188622/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:30,097 WARN [Thread-418 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:46:30,100 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc7944e5961447feb with lease ID 0x55a307d88283d974: Processing first storage report for DS-4e694b23-d24c-44e4-9af9-8e57cc4db68c from datanode DatanodeRegistration(127.0.0.1:46007, datanodeUuid=2012846e-5a0d-4192-8b4a-dba456d7990b, infoPort=46269, infoSecurePort=0, ipcPort=44165, storageInfo=lv=-57;cid=testClusterID;nsid=756667262;c=1731671188622) 2024-11-15T11:46:30,100 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7944e5961447feb with lease ID 0x55a307d88283d974: from storage DS-4e694b23-d24c-44e4-9af9-8e57cc4db68c node DatanodeRegistration(127.0.0.1:46007, datanodeUuid=2012846e-5a0d-4192-8b4a-dba456d7990b, infoPort=46269, infoSecurePort=0, ipcPort=44165, storageInfo=lv=-57;cid=testClusterID;nsid=756667262;c=1731671188622), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:30,100 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc7944e5961447feb with lease ID 0x55a307d88283d974: Processing first storage report for DS-e105bff8-8996-4db6-bf4b-0989f29e861a from datanode DatanodeRegistration(127.0.0.1:46007, datanodeUuid=2012846e-5a0d-4192-8b4a-dba456d7990b, infoPort=46269, infoSecurePort=0, ipcPort=44165, storageInfo=lv=-57;cid=testClusterID;nsid=756667262;c=1731671188622) 2024-11-15T11:46:30,100 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7944e5961447feb with lease ID 0x55a307d88283d974: from storage DS-e105bff8-8996-4db6-bf4b-0989f29e861a node DatanodeRegistration(127.0.0.1:46007, datanodeUuid=2012846e-5a0d-4192-8b4a-dba456d7990b, infoPort=46269, infoSecurePort=0, ipcPort=44165, storageInfo=lv=-57;cid=testClusterID;nsid=756667262;c=1731671188622), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:30,276 WARN [Thread-466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/data/data4/current/BP-1924943516-172.17.0.2-1731671188622/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:30,276 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/data/data3/current/BP-1924943516-172.17.0.2-1731671188622/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:30,301 WARN [Thread-441 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:46:30,304 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fe9315784488966 with lease ID 0x55a307d88283d975: Processing first storage report for DS-0aad8163-98db-478d-a88e-f6e375439586 from datanode DatanodeRegistration(127.0.0.1:42323, datanodeUuid=4825ed79-73f8-4a13-b23d-596d79a815b8, infoPort=38291, infoSecurePort=0, ipcPort=46017, storageInfo=lv=-57;cid=testClusterID;nsid=756667262;c=1731671188622) 2024-11-15T11:46:30,304 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fe9315784488966 with lease ID 0x55a307d88283d975: from storage DS-0aad8163-98db-478d-a88e-f6e375439586 node DatanodeRegistration(127.0.0.1:42323, datanodeUuid=4825ed79-73f8-4a13-b23d-596d79a815b8, infoPort=38291, infoSecurePort=0, ipcPort=46017, storageInfo=lv=-57;cid=testClusterID;nsid=756667262;c=1731671188622), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:30,304 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fe9315784488966 with lease ID 0x55a307d88283d975: Processing first storage report for DS-52c1c288-81b3-41ba-9b25-06934d8143a6 from datanode DatanodeRegistration(127.0.0.1:42323, datanodeUuid=4825ed79-73f8-4a13-b23d-596d79a815b8, infoPort=38291, infoSecurePort=0, ipcPort=46017, storageInfo=lv=-57;cid=testClusterID;nsid=756667262;c=1731671188622) 2024-11-15T11:46:30,304 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fe9315784488966 with lease ID 0x55a307d88283d975: from storage DS-52c1c288-81b3-41ba-9b25-06934d8143a6 node DatanodeRegistration(127.0.0.1:42323, datanodeUuid=4825ed79-73f8-4a13-b23d-596d79a815b8, infoPort=38291, infoSecurePort=0, ipcPort=46017, storageInfo=lv=-57;cid=testClusterID;nsid=756667262;c=1731671188622), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:30,361 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67 2024-11-15T11:46:30,365 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/zookeeper_0, clientPort=57914, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T11:46:30,366 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57914 2024-11-15T11:46:30,366 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:30,369 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:30,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:46:30,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:46:30,387 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247 with version=8 2024-11-15T11:46:30,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/hbase-staging 2024-11-15T11:46:30,391 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:46:30,391 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:30,391 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:30,391 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:46:30,391 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:30,391 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:46:30,391 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T11:46:30,392 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:46:30,393 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34655 2024-11-15T11:46:30,395 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34655 connecting to ZooKeeper ensemble=127.0.0.1:57914 2024-11-15T11:46:30,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:346550x0, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:46:30,459 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34655-0x1013f9afe210000 connected 2024-11-15T11:46:30,532 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:30,533 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:30,536 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:46:30,536 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247, hbase.cluster.distributed=false 2024-11-15T11:46:30,538 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:46:30,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34655 2024-11-15T11:46:30,539 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34655 2024-11-15T11:46:30,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34655 2024-11-15T11:46:30,541 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34655 2024-11-15T11:46:30,541 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34655 2024-11-15T11:46:30,555 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:46:30,555 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:30,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:30,556 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:46:30,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:30,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:46:30,556 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:46:30,556 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:46:30,557 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37825 2024-11-15T11:46:30,559 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37825 connecting to ZooKeeper ensemble=127.0.0.1:57914 2024-11-15T11:46:30,560 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:30,563 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:30,574 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:378250x0, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:46:30,574 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:378250x0, quorum=127.0.0.1:57914, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:46:30,574 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37825-0x1013f9afe210001 connected 2024-11-15T11:46:30,575 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:46:30,576 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:46:30,577 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T11:46:30,578 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:46:30,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37825 2024-11-15T11:46:30,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37825 2024-11-15T11:46:30,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37825 2024-11-15T11:46:30,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37825 2024-11-15T11:46:30,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37825 2024-11-15T11:46:30,592 DEBUG [M:0;7adf9b3d9d04:34655 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7adf9b3d9d04:34655 2024-11-15T11:46:30,593 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7adf9b3d9d04,34655,1731671190390 2024-11-15T11:46:30,598 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:46:30,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:46:30,599 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7adf9b3d9d04,34655,1731671190390 2024-11-15T11:46:30,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:30,607 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T11:46:30,607 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:30,608 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T11:46:30,608 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7adf9b3d9d04,34655,1731671190390 from backup master directory 2024-11-15T11:46:30,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7adf9b3d9d04,34655,1731671190390 2024-11-15T11:46:30,615 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:46:30,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:46:30,615 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:46:30,615 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7adf9b3d9d04,34655,1731671190390 2024-11-15T11:46:30,621 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/hbase.id] with ID: 0f3d2ee4-b68d-4924-a245-5e8abc8c7e13 2024-11-15T11:46:30,621 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/.tmp/hbase.id 2024-11-15T11:46:30,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:46:30,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:46:30,627 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/.tmp/hbase.id]:[hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/hbase.id] 2024-11-15T11:46:30,641 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:30,641 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T11:46:30,643 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T11:46:30,655 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:30,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:30,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:46:30,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:46:30,663 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:46:30,664 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T11:46:30,664 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:46:30,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:46:30,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:46:31,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:46:31,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T11:46:31,018 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T11:46:31,018 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-15T11:46:31,075 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store 2024-11-15T11:46:31,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:46:31,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:46:31,085 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:46:31,085 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:46:31,085 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:31,085 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:31,085 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:46:31,086 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:31,086 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:31,086 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671191085Disabling compacts and flushes for region at 1731671191085Disabling writes for close at 1731671191085Writing region close event to WAL at 1731671191086 (+1 ms)Closed at 1731671191086 2024-11-15T11:46:31,087 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/.initializing 2024-11-15T11:46:31,087 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/WALs/7adf9b3d9d04,34655,1731671190390 2024-11-15T11:46:31,092 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C34655%2C1731671190390, suffix=, logDir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/WALs/7adf9b3d9d04,34655,1731671190390, archiveDir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/oldWALs, maxLogs=10 2024-11-15T11:46:31,093 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C34655%2C1731671190390.1731671191093 2024-11-15T11:46:31,104 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/WALs/7adf9b3d9d04,34655,1731671190390/7adf9b3d9d04%2C34655%2C1731671190390.1731671191093 2024-11-15T11:46:31,111 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38291:38291),(127.0.0.1/127.0.0.1:46269:46269)] 2024-11-15T11:46:31,115 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:46:31,116 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:46:31,116 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,116 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T11:46:31,121 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:31,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:31,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,124 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T11:46:31,124 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:31,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:46:31,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,128 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T11:46:31,128 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:31,129 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:46:31,129 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T11:46:31,132 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:31,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:46:31,133 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,134 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,135 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,137 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,137 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,137 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T11:46:31,139 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:31,142 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:46:31,143 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846284, jitterRate=0.07610686123371124}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T11:46:31,144 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731671191116Initializing all the Stores at 1731671191118 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671191118Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671191118Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671191118Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671191118Cleaning up temporary data from old regions at 1731671191137 (+19 ms)Region opened successfully at 1731671191144 (+7 ms) 2024-11-15T11:46:31,145 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T11:46:31,151 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69e60d64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:46:31,152 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T11:46:31,152 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T11:46:31,153 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T11:46:31,153 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T11:46:31,154 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T11:46:31,155 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T11:46:31,155 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T11:46:31,158 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T11:46:31,159 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T11:46:31,207 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T11:46:31,207 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T11:46:31,208 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T11:46:31,215 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T11:46:31,215 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T11:46:31,216 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T11:46:31,223 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T11:46:31,224 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T11:46:31,231 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T11:46:31,234 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T11:46:31,240 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T11:46:31,248 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:46:31,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:46:31,248 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:31,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:31,249 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7adf9b3d9d04,34655,1731671190390, sessionid=0x1013f9afe210000, setting cluster-up flag (Was=false) 2024-11-15T11:46:31,265 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:31,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:31,290 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T11:46:31,291 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,34655,1731671190390 2024-11-15T11:46:31,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:31,307 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:31,332 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T11:46:31,333 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,34655,1731671190390 2024-11-15T11:46:31,334 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T11:46:31,336 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T11:46:31,336 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T11:46:31,336 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T11:46:31,336 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7adf9b3d9d04,34655,1731671190390 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T11:46:31,338 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:46:31,338 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:46:31,338 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:46:31,338 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:46:31,338 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7adf9b3d9d04:0, corePoolSize=10, maxPoolSize=10 2024-11-15T11:46:31,338 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,338 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:46:31,338 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,339 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731671221339 2024-11-15T11:46:31,340 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T11:46:31,340 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T11:46:31,340 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T11:46:31,340 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T11:46:31,340 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T11:46:31,340 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T11:46:31,340 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,340 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:46:31,340 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T11:46:31,340 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T11:46:31,341 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T11:46:31,341 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T11:46:31,341 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T11:46:31,341 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T11:46:31,341 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671191341,5,FailOnTimeoutGroup] 2024-11-15T11:46:31,341 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671191341,5,FailOnTimeoutGroup] 2024-11-15T11:46:31,341 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,342 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T11:46:31,342 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,342 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:31,342 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,342 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T11:46:31,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:46:31,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:46:31,350 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T11:46:31,350 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247 2024-11-15T11:46:31,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:46:31,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:46:31,359 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:46:31,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:46:31,364 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:46:31,364 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:31,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:31,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:46:31,366 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:46:31,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:31,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:31,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:46:31,369 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:46:31,369 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:31,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:31,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:46:31,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:46:31,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:31,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:31,371 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:46:31,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740 2024-11-15T11:46:31,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740 2024-11-15T11:46:31,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:46:31,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:46:31,375 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:46:31,376 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:46:31,378 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:46:31,379 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862776, jitterRate=0.09707659482955933}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:46:31,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731671191359Initializing all the Stores at 1731671191361 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671191361Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671191362 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671191362Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671191362Cleaning up temporary data from old regions at 1731671191374 (+12 ms)Region opened successfully at 1731671191380 (+6 ms) 2024-11-15T11:46:31,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:46:31,380 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:46:31,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:46:31,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:46:31,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:46:31,381 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:46:31,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671191380Disabling compacts and flushes for region at 1731671191380Disabling writes for close at 1731671191380Writing region close event to WAL at 1731671191380Closed at 1731671191381 (+1 ms) 2024-11-15T11:46:31,382 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(746): ClusterId : 0f3d2ee4-b68d-4924-a245-5e8abc8c7e13 2024-11-15T11:46:31,382 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:46:31,382 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:46:31,382 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T11:46:31,383 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T11:46:31,384 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:46:31,385 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T11:46:31,400 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:46:31,400 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:46:31,408 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:46:31,408 DEBUG [RS:0;7adf9b3d9d04:37825 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@388cfa43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:46:31,420 DEBUG [RS:0;7adf9b3d9d04:37825 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7adf9b3d9d04:37825 2024-11-15T11:46:31,420 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:46:31,420 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:46:31,420 DEBUG [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:46:31,421 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(2659): reportForDuty to master=7adf9b3d9d04,34655,1731671190390 with port=37825, startcode=1731671190555 2024-11-15T11:46:31,421 DEBUG [RS:0;7adf9b3d9d04:37825 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:46:31,424 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59851, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:46:31,424 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34655 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:31,425 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34655 {}] master.ServerManager(517): Registering regionserver=7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:31,427 DEBUG [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247 2024-11-15T11:46:31,427 DEBUG [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39177 2024-11-15T11:46:31,427 DEBUG [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:46:31,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:46:31,439 DEBUG [RS:0;7adf9b3d9d04:37825 {}] zookeeper.ZKUtil(111): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:31,439 WARN [RS:0;7adf9b3d9d04:37825 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:46:31,439 INFO [RS:0;7adf9b3d9d04:37825 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:46:31,439 DEBUG [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/WALs/7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:31,439 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7adf9b3d9d04,37825,1731671190555] 2024-11-15T11:46:31,443 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:46:31,446 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:46:31,447 INFO [RS:0;7adf9b3d9d04:37825 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:46:31,447 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,447 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:46:31,448 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:46:31,448 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:46:31,449 DEBUG [RS:0;7adf9b3d9d04:37825 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:46:31,450 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,450 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,450 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,450 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,450 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,450 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,37825,1731671190555-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:46:31,465 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:46:31,465 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,37825,1731671190555-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,465 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,465 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.Replication(171): 7adf9b3d9d04,37825,1731671190555 started 2024-11-15T11:46:31,479 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:31,479 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(1482): Serving as 7adf9b3d9d04,37825,1731671190555, RpcServer on 7adf9b3d9d04/172.17.0.2:37825, sessionid=0x1013f9afe210001 2024-11-15T11:46:31,480 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:46:31,480 DEBUG [RS:0;7adf9b3d9d04:37825 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:31,480 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,37825,1731671190555' 2024-11-15T11:46:31,480 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:46:31,480 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:46:31,481 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:46:31,481 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:46:31,481 DEBUG [RS:0;7adf9b3d9d04:37825 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:31,481 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,37825,1731671190555' 2024-11-15T11:46:31,481 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:46:31,481 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:46:31,482 DEBUG [RS:0;7adf9b3d9d04:37825 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:46:31,482 INFO [RS:0;7adf9b3d9d04:37825 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:46:31,482 INFO [RS:0;7adf9b3d9d04:37825 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:46:31,536 WARN [7adf9b3d9d04:34655 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-15T11:46:31,585 INFO [RS:0;7adf9b3d9d04:37825 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C37825%2C1731671190555, suffix=, logDir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/WALs/7adf9b3d9d04,37825,1731671190555, archiveDir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/oldWALs, maxLogs=32 2024-11-15T11:46:31,588 INFO [RS:0;7adf9b3d9d04:37825 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C37825%2C1731671190555.1731671191588 2024-11-15T11:46:31,596 INFO [RS:0;7adf9b3d9d04:37825 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/WALs/7adf9b3d9d04,37825,1731671190555/7adf9b3d9d04%2C37825%2C1731671190555.1731671191588 2024-11-15T11:46:31,597 DEBUG [RS:0;7adf9b3d9d04:37825 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46269:46269),(127.0.0.1/127.0.0.1:38291:38291)] 2024-11-15T11:46:31,786 DEBUG [7adf9b3d9d04:34655 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T11:46:31,787 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:31,788 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,37825,1731671190555, state=OPENING 2024-11-15T11:46:31,798 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T11:46:31,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:31,807 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:31,807 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:46:31,807 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:46:31,807 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:46:31,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,37825,1731671190555}] 2024-11-15T11:46:31,961 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:46:31,964 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37571, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:46:31,971 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T11:46:31,972 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:46:31,975 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C37825%2C1731671190555.meta, suffix=.meta, logDir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/WALs/7adf9b3d9d04,37825,1731671190555, archiveDir=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/oldWALs, maxLogs=32 2024-11-15T11:46:31,978 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C37825%2C1731671190555.meta.1731671191978.meta 2024-11-15T11:46:31,985 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/WALs/7adf9b3d9d04,37825,1731671190555/7adf9b3d9d04%2C37825%2C1731671190555.meta.1731671191978.meta 2024-11-15T11:46:31,991 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38291:38291),(127.0.0.1/127.0.0.1:46269:46269)] 2024-11-15T11:46:31,995 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:46:31,995 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T11:46:31,995 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T11:46:31,996 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T11:46:31,996 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T11:46:31,996 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:46:31,996 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T11:46:31,996 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T11:46:31,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:46:32,000 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:46:32,000 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:32,000 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:32,000 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:46:32,002 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:46:32,002 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:32,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:32,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:46:32,004 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:46:32,005 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:32,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:32,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:46:32,007 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:46:32,008 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:32,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:32,009 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:46:32,010 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740 2024-11-15T11:46:32,011 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740 2024-11-15T11:46:32,013 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:46:32,013 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:46:32,014 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:46:32,016 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:46:32,017 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755417, jitterRate=-0.039438605308532715}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:46:32,017 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T11:46:32,018 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731671191996Writing region info on filesystem at 1731671191996Initializing all the Stores at 1731671191998 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671191998Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671191998Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671191998Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671191998Cleaning up temporary data from old regions at 1731671192013 (+15 ms)Running coprocessor post-open hooks at 1731671192018 (+5 ms)Region opened successfully at 1731671192018 2024-11-15T11:46:32,020 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731671191961 2024-11-15T11:46:32,023 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T11:46:32,023 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T11:46:32,025 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:32,026 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,37825,1731671190555, state=OPEN 2024-11-15T11:46:32,074 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:46:32,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:46:32,074 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:32,075 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:46:32,075 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:46:32,078 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T11:46:32,078 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,37825,1731671190555 in 268 msec 2024-11-15T11:46:32,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T11:46:32,083 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 696 msec 2024-11-15T11:46:32,084 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:46:32,084 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T11:46:32,086 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:46:32,086 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,37825,1731671190555, seqNum=-1] 2024-11-15T11:46:32,087 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:46:32,088 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54941, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:46:32,097 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 760 msec 2024-11-15T11:46:32,097 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731671192097, completionTime=-1 2024-11-15T11:46:32,097 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T11:46:32,097 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-15T11:46:32,100 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-15T11:46:32,100 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731671252100 2024-11-15T11:46:32,100 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731671312100 2024-11-15T11:46:32,100 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-11-15T11:46:32,101 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34655,1731671190390-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:32,101 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34655,1731671190390-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:32,101 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34655,1731671190390-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:32,101 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7adf9b3d9d04:34655, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:32,101 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:32,101 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:32,104 DEBUG [master/7adf9b3d9d04:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T11:46:32,113 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.497sec 2024-11-15T11:46:32,113 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T11:46:32,113 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T11:46:32,113 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T11:46:32,113 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T11:46:32,113 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T11:46:32,113 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34655,1731671190390-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:46:32,113 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34655,1731671190390-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T11:46:32,117 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T11:46:32,118 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T11:46:32,118 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34655,1731671190390-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:32,183 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6512930c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:46:32,183 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7adf9b3d9d04,34655,-1 for getting cluster id 2024-11-15T11:46:32,184 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:46:32,186 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0f3d2ee4-b68d-4924-a245-5e8abc8c7e13' 2024-11-15T11:46:32,187 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:46:32,187 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0f3d2ee4-b68d-4924-a245-5e8abc8c7e13" 2024-11-15T11:46:32,188 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d9aa0e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:46:32,188 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7adf9b3d9d04,34655,-1] 2024-11-15T11:46:32,188 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:46:32,189 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:32,191 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53762, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:46:32,192 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd69191, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:46:32,193 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:46:32,195 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,37825,1731671190555, seqNum=-1] 2024-11-15T11:46:32,195 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:46:32,198 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53120, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:46:32,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7adf9b3d9d04,34655,1731671190390 2024-11-15T11:46:32,201 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:32,206 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T11:46:32,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T11:46:32,206 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:46:32,207 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:46:32,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:32,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:32,207 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T11:46:32,207 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=604601029, stopped=false 2024-11-15T11:46:32,207 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:46:32,207 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7adf9b3d9d04,34655,1731671190390 2024-11-15T11:46:32,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:46:32,230 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:46:32,230 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:46:32,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:32,230 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:32,231 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:46:32,231 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:46:32,231 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:46:32,231 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:46:32,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:32,232 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7adf9b3d9d04,37825,1731671190555' ***** 2024-11-15T11:46:32,232 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:46:32,232 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:46:32,232 INFO [RS:0;7adf9b3d9d04:37825 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:46:32,232 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:46:32,232 INFO [RS:0;7adf9b3d9d04:37825 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:46:32,232 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(959): stopping server 7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:32,232 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:46:32,232 INFO [RS:0;7adf9b3d9d04:37825 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7adf9b3d9d04:37825. 2024-11-15T11:46:32,233 DEBUG [RS:0;7adf9b3d9d04:37825 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:46:32,233 DEBUG [RS:0;7adf9b3d9d04:37825 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:32,233 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:46:32,233 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:46:32,233 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:46:32,233 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T11:46:32,233 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T11:46:32,233 DEBUG [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-15T11:46:32,233 DEBUG [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-15T11:46:32,233 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:46:32,233 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:46:32,234 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:46:32,234 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:46:32,234 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:46:32,234 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-15T11:46:32,255 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740/.tmp/ns/a738278a783146138f41c48b0b16226a is 43, key is default/ns:d/1731671192089/Put/seqid=0 2024-11-15T11:46:32,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741835_1011 (size=5153) 2024-11-15T11:46:32,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741835_1011 (size=5153) 2024-11-15T11:46:32,266 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740/.tmp/ns/a738278a783146138f41c48b0b16226a 2024-11-15T11:46:32,279 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740/.tmp/ns/a738278a783146138f41c48b0b16226a as hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740/ns/a738278a783146138f41c48b0b16226a 2024-11-15T11:46:32,292 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740/ns/a738278a783146138f41c48b0b16226a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-15T11:46:32,293 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 59ms, sequenceid=6, compaction requested=false 2024-11-15T11:46:32,294 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T11:46:32,301 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:46:32,302 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:46:32,302 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:46:32,302 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671192233Running coprocessor pre-close hooks at 1731671192233Disabling compacts and flushes for region at 1731671192233Disabling writes for close at 1731671192234 (+1 ms)Obtaining lock to block concurrent updates at 1731671192234Preparing flush snapshotting stores in 1588230740 at 1731671192234Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731671192234Flushing stores of hbase:meta,,1.1588230740 at 1731671192235 (+1 ms)Flushing 1588230740/ns: creating writer at 1731671192236 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731671192255 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731671192255Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c21d190: reopening flushed file at 1731671192274 (+19 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 59ms, sequenceid=6, compaction requested=false at 1731671192294 (+20 ms)Writing region close event to WAL at 1731671192295 (+1 ms)Running coprocessor post-close hooks at 1731671192302 (+7 ms)Closed at 1731671192302 2024-11-15T11:46:32,302 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T11:46:32,434 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(976): stopping server 7adf9b3d9d04,37825,1731671190555; all regions closed. 2024-11-15T11:46:32,434 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,434 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,435 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,435 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,435 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741834_1010 (size=1152) 2024-11-15T11:46:32,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741834_1010 (size=1152) 2024-11-15T11:46:32,444 DEBUG [RS:0;7adf9b3d9d04:37825 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/oldWALs 2024-11-15T11:46:32,444 INFO [RS:0;7adf9b3d9d04:37825 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C37825%2C1731671190555.meta:.meta(num 1731671191978) 2024-11-15T11:46:32,445 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,445 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,445 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,445 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,445 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741833_1009 (size=93) 2024-11-15T11:46:32,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741833_1009 (size=93) 2024-11-15T11:46:32,450 INFO [regionserver/7adf9b3d9d04:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T11:46:32,450 INFO [regionserver/7adf9b3d9d04:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T11:46:32,452 DEBUG [RS:0;7adf9b3d9d04:37825 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/oldWALs 2024-11-15T11:46:32,452 INFO [RS:0;7adf9b3d9d04:37825 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C37825%2C1731671190555:(num 1731671191588) 2024-11-15T11:46:32,452 DEBUG [RS:0;7adf9b3d9d04:37825 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:32,452 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:46:32,452 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:46:32,452 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.ChoreService(370): Chore service for: regionserver/7adf9b3d9d04:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T11:46:32,453 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:46:32,453 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:46:32,453 INFO [RS:0;7adf9b3d9d04:37825 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37825 2024-11-15T11:46:32,482 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7adf9b3d9d04,37825,1731671190555 2024-11-15T11:46:32,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:46:32,482 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:46:32,482 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7adf9b3d9d04,37825,1731671190555] 2024-11-15T11:46:32,490 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7adf9b3d9d04,37825,1731671190555 already deleted, retry=false 2024-11-15T11:46:32,490 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7adf9b3d9d04,37825,1731671190555 expired; onlineServers=0 2024-11-15T11:46:32,490 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7adf9b3d9d04,34655,1731671190390' ***** 2024-11-15T11:46:32,490 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T11:46:32,490 INFO [M:0;7adf9b3d9d04:34655 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:46:32,491 INFO [M:0;7adf9b3d9d04:34655 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:46:32,491 DEBUG [M:0;7adf9b3d9d04:34655 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T11:46:32,491 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T11:46:32,491 DEBUG [M:0;7adf9b3d9d04:34655 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T11:46:32,491 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671191341 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671191341,5,FailOnTimeoutGroup] 2024-11-15T11:46:32,491 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671191341 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671191341,5,FailOnTimeoutGroup] 2024-11-15T11:46:32,491 INFO [M:0;7adf9b3d9d04:34655 {}] hbase.ChoreService(370): Chore service for: master/7adf9b3d9d04:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T11:46:32,491 INFO [M:0;7adf9b3d9d04:34655 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:46:32,491 DEBUG [M:0;7adf9b3d9d04:34655 {}] master.HMaster(1795): Stopping service threads 2024-11-15T11:46:32,491 INFO [M:0;7adf9b3d9d04:34655 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T11:46:32,491 INFO [M:0;7adf9b3d9d04:34655 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:46:32,492 INFO [M:0;7adf9b3d9d04:34655 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T11:46:32,492 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T11:46:32,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T11:46:32,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:32,506 DEBUG [M:0;7adf9b3d9d04:34655 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-15T11:46:32,507 DEBUG [M:0;7adf9b3d9d04:34655 {}] master.ActiveMasterManager(353): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-15T11:46:32,507 INFO [M:0;7adf9b3d9d04:34655 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/.lastflushedseqids 2024-11-15T11:46:32,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741836_1012 (size=99) 2024-11-15T11:46:32,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741836_1012 (size=99) 2024-11-15T11:46:32,516 INFO [M:0;7adf9b3d9d04:34655 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T11:46:32,517 INFO [M:0;7adf9b3d9d04:34655 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T11:46:32,517 DEBUG [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:46:32,517 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:32,517 DEBUG [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:32,517 DEBUG [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:46:32,517 DEBUG [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:32,517 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-15T11:46:32,537 DEBUG [M:0;7adf9b3d9d04:34655 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c7a93d20bf9a4980951b40a43a955061 is 82, key is hbase:meta,,1/info:regioninfo/1731671192024/Put/seqid=0 2024-11-15T11:46:32,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741837_1013 (size=5672) 2024-11-15T11:46:32,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741837_1013 (size=5672) 2024-11-15T11:46:32,546 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c7a93d20bf9a4980951b40a43a955061 2024-11-15T11:46:32,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:46:32,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:46:32,571 DEBUG [M:0;7adf9b3d9d04:34655 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5443a04df3a14d8482319338d27f1fe6 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731671192096/Put/seqid=0 2024-11-15T11:46:32,590 INFO [RS:0;7adf9b3d9d04:37825 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:46:32,590 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:46:32,590 INFO [RS:0;7adf9b3d9d04:37825 {}] regionserver.HRegionServer(1031): Exiting; stopping=7adf9b3d9d04,37825,1731671190555; zookeeper connection closed. 2024-11-15T11:46:32,590 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37825-0x1013f9afe210001, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:46:32,598 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@379ec9eb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@379ec9eb 2024-11-15T11:46:32,599 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T11:46:32,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741838_1014 (size=5275) 2024-11-15T11:46:32,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741838_1014 (size=5275) 2024-11-15T11:46:32,603 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5443a04df3a14d8482319338d27f1fe6 2024-11-15T11:46:32,634 DEBUG [M:0;7adf9b3d9d04:34655 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14945c57f1ef45a5810e01be730364bd is 69, key is 7adf9b3d9d04,37825,1731671190555/rs:state/1731671191425/Put/seqid=0 2024-11-15T11:46:32,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741839_1015 (size=5156) 2024-11-15T11:46:32,645 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14945c57f1ef45a5810e01be730364bd 2024-11-15T11:46:32,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741839_1015 (size=5156) 2024-11-15T11:46:32,683 DEBUG [M:0;7adf9b3d9d04:34655 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/742a6752259d4686978490ba1a66b284 is 52, key is load_balancer_on/state:d/1731671192204/Put/seqid=0 2024-11-15T11:46:32,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741840_1016 (size=5056) 2024-11-15T11:46:32,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741840_1016 (size=5056) 2024-11-15T11:46:32,696 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/742a6752259d4686978490ba1a66b284 2024-11-15T11:46:32,710 DEBUG [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c7a93d20bf9a4980951b40a43a955061 as hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c7a93d20bf9a4980951b40a43a955061 2024-11-15T11:46:32,722 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c7a93d20bf9a4980951b40a43a955061, entries=8, sequenceid=29, filesize=5.5 K 2024-11-15T11:46:32,724 DEBUG [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5443a04df3a14d8482319338d27f1fe6 as hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5443a04df3a14d8482319338d27f1fe6 2024-11-15T11:46:32,732 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5443a04df3a14d8482319338d27f1fe6, entries=3, sequenceid=29, filesize=5.2 K 2024-11-15T11:46:32,733 DEBUG [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14945c57f1ef45a5810e01be730364bd as hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/14945c57f1ef45a5810e01be730364bd 2024-11-15T11:46:32,742 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/14945c57f1ef45a5810e01be730364bd, entries=1, sequenceid=29, filesize=5.0 K 2024-11-15T11:46:32,747 DEBUG [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/742a6752259d4686978490ba1a66b284 as hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/742a6752259d4686978490ba1a66b284 2024-11-15T11:46:32,757 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39177/user/jenkins/test-data/ad7529a5-7cbd-b73b-7c81-93f1a0a89247/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/742a6752259d4686978490ba1a66b284, entries=1, sequenceid=29, filesize=4.9 K 2024-11-15T11:46:32,759 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 242ms, sequenceid=29, compaction requested=false 2024-11-15T11:46:32,763 INFO [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:32,764 DEBUG [M:0;7adf9b3d9d04:34655 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671192517Disabling compacts and flushes for region at 1731671192517Disabling writes for close at 1731671192517Obtaining lock to block concurrent updates at 1731671192517Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731671192517Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731671192518 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731671192519 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731671192519Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731671192536 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731671192536Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731671192553 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731671192570 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731671192570Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731671192611 (+41 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731671192633 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731671192633Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731671192657 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731671192682 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731671192682Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67ed5689: reopening flushed file at 1731671192708 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48493406: reopening flushed file at 1731671192722 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57d20b88: reopening flushed file at 1731671192732 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3eec13a8: reopening flushed file at 1731671192742 (+10 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 242ms, sequenceid=29, compaction requested=false at 1731671192759 (+17 ms)Writing region close event to WAL at 1731671192763 (+4 ms)Closed at 1731671192763 2024-11-15T11:46:32,764 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,764 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,764 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,764 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,765 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:32,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42323 is added to blk_1073741830_1006 (size=10311) 2024-11-15T11:46:32,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46007 is added to blk_1073741830_1006 (size=10311) 2024-11-15T11:46:32,768 INFO [M:0;7adf9b3d9d04:34655 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T11:46:32,768 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:46:32,768 INFO [M:0;7adf9b3d9d04:34655 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34655 2024-11-15T11:46:32,768 INFO [M:0;7adf9b3d9d04:34655 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:46:32,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:46:32,897 INFO [M:0;7adf9b3d9d04:34655 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:46:32,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34655-0x1013f9afe210000, quorum=127.0.0.1:57914, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:46:32,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@da5059a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:32,902 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:46:32,902 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:46:32,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:46:32,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/hadoop.log.dir/,STOPPED} 2024-11-15T11:46:32,904 WARN [BP-1924943516-172.17.0.2-1731671188622 heartbeating to localhost/127.0.0.1:39177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:46:32,904 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:46:32,904 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:46:32,904 WARN [BP-1924943516-172.17.0.2-1731671188622 heartbeating to localhost/127.0.0.1:39177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1924943516-172.17.0.2-1731671188622 (Datanode Uuid 4825ed79-73f8-4a13-b23d-596d79a815b8) service to localhost/127.0.0.1:39177 2024-11-15T11:46:32,905 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/data/data3/current/BP-1924943516-172.17.0.2-1731671188622 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:32,905 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/data/data4/current/BP-1924943516-172.17.0.2-1731671188622 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:32,905 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:46:32,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4595827f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:32,916 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:46:32,916 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:46:32,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:46:32,917 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/hadoop.log.dir/,STOPPED} 2024-11-15T11:46:32,919 WARN [BP-1924943516-172.17.0.2-1731671188622 heartbeating to localhost/127.0.0.1:39177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:46:32,919 WARN [BP-1924943516-172.17.0.2-1731671188622 heartbeating to localhost/127.0.0.1:39177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1924943516-172.17.0.2-1731671188622 (Datanode Uuid 2012846e-5a0d-4192-8b4a-dba456d7990b) service to localhost/127.0.0.1:39177 2024-11-15T11:46:32,920 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/data/data1/current/BP-1924943516-172.17.0.2-1731671188622 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:32,920 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:46:32,920 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/cluster_1ee90bcf-a8aa-0eb6-6456-c1022489b8ff/data/data2/current/BP-1924943516-172.17.0.2-1731671188622 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:32,920 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:46:32,921 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:46:32,930 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55cb1221{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:46:32,931 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:46:32,931 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:46:32,931 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:46:32,931 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/hadoop.log.dir/,STOPPED} 2024-11-15T11:46:32,940 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T11:46:32,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T11:46:32,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T11:46:32,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/hadoop.log.dir so I do NOT create it in target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22 2024-11-15T11:46:32,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1d1727d-ac4a-5cdf-f78d-2df22b4e1c67/hadoop.tmp.dir so I do NOT create it in target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22 2024-11-15T11:46:32,967 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116, deleteOnExit=true 2024-11-15T11:46:32,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T11:46:32,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/test.cache.data in system properties and HBase conf 2024-11-15T11:46:32,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T11:46:32,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir in system properties and HBase conf 2024-11-15T11:46:32,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T11:46:32,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T11:46:32,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T11:46:32,968 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T11:46:32,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:46:32,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:46:32,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T11:46:32,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:46:32,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T11:46:32,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T11:46:32,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:46:32,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:46:32,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T11:46:32,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/nfs.dump.dir in system properties and HBase conf 2024-11-15T11:46:32,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/java.io.tmpdir in system properties and HBase conf 2024-11-15T11:46:32,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:46:32,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T11:46:32,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T11:46:32,991 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:46:33,098 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T11:46:33,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:46:33,121 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:46:33,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:46:33,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:46:33,241 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:33,250 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:46:33,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:46:33,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:46:33,252 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:46:33,253 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:33,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@475f8022{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:46:33,255 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa07d80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:46:33,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@94a50db{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/java.io.tmpdir/jetty-localhost-35283-hadoop-hdfs-3_4_1-tests_jar-_-any-7345099969871137058/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:46:33,399 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38184680{HTTP/1.1, (http/1.1)}{localhost:35283} 2024-11-15T11:46:33,399 INFO [Time-limited test {}] server.Server(415): Started @109153ms 2024-11-15T11:46:33,419 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:46:33,451 INFO [regionserver/7adf9b3d9d04:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:46:33,631 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:33,635 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:46:33,637 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:46:33,637 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:46:33,637 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:46:33,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f87a993{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:46:33,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@180ba686{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:46:33,747 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a66255b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/java.io.tmpdir/jetty-localhost-46735-hadoop-hdfs-3_4_1-tests_jar-_-any-13904185377035598717/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:33,747 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1827207e{HTTP/1.1, (http/1.1)}{localhost:46735} 2024-11-15T11:46:33,747 INFO [Time-limited test {}] server.Server(415): Started @109501ms 2024-11-15T11:46:33,749 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:46:33,803 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:33,807 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:46:33,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:46:33,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:46:33,812 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:46:33,813 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10b53169{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:46:33,814 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57ebe64c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:46:33,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ae35b98{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/java.io.tmpdir/jetty-localhost-36933-hadoop-hdfs-3_4_1-tests_jar-_-any-9788530296814048516/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:33,913 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47d1c4f2{HTTP/1.1, (http/1.1)}{localhost:36933} 2024-11-15T11:46:33,913 INFO [Time-limited test {}] server.Server(415): Started @109667ms 2024-11-15T11:46:33,915 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:46:34,647 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data1/current/BP-1235867767-172.17.0.2-1731671193006/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:34,647 WARN [Thread-675 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data2/current/BP-1235867767-172.17.0.2-1731671193006/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:34,675 WARN [Thread-638 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:46:34,677 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6bc134659f84ec71 with lease ID 0x5b612b60ecdb631b: Processing first storage report for DS-73a9b79b-0597-4c5e-9e1d-964c7338721e from datanode DatanodeRegistration(127.0.0.1:43663, datanodeUuid=416b8e7b-471d-4895-ba54-25752c4128d5, infoPort=33755, infoSecurePort=0, ipcPort=44705, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006) 2024-11-15T11:46:34,678 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6bc134659f84ec71 with lease ID 0x5b612b60ecdb631b: from storage DS-73a9b79b-0597-4c5e-9e1d-964c7338721e node DatanodeRegistration(127.0.0.1:43663, datanodeUuid=416b8e7b-471d-4895-ba54-25752c4128d5, infoPort=33755, infoSecurePort=0, ipcPort=44705, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:34,678 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6bc134659f84ec71 with lease ID 0x5b612b60ecdb631b: Processing first storage report for DS-5a1b424e-90d0-4fbb-9c7f-aeb4c15cb7d1 from datanode DatanodeRegistration(127.0.0.1:43663, datanodeUuid=416b8e7b-471d-4895-ba54-25752c4128d5, infoPort=33755, infoSecurePort=0, ipcPort=44705, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006) 2024-11-15T11:46:34,678 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6bc134659f84ec71 with lease ID 0x5b612b60ecdb631b: from storage DS-5a1b424e-90d0-4fbb-9c7f-aeb4c15cb7d1 node DatanodeRegistration(127.0.0.1:43663, datanodeUuid=416b8e7b-471d-4895-ba54-25752c4128d5, infoPort=33755, infoSecurePort=0, ipcPort=44705, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:34,782 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data3/current/BP-1235867767-172.17.0.2-1731671193006/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:34,782 WARN [Thread-686 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data4/current/BP-1235867767-172.17.0.2-1731671193006/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:34,815 WARN [Thread-661 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:46:34,821 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb09c07abd9aeb62f with lease ID 0x5b612b60ecdb631c: Processing first storage report for DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b from datanode DatanodeRegistration(127.0.0.1:45005, datanodeUuid=053e9b0a-0ad1-4ae0-be32-c63262dd71be, infoPort=46173, infoSecurePort=0, ipcPort=41559, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006) 2024-11-15T11:46:34,821 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb09c07abd9aeb62f with lease ID 0x5b612b60ecdb631c: from storage DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b node DatanodeRegistration(127.0.0.1:45005, datanodeUuid=053e9b0a-0ad1-4ae0-be32-c63262dd71be, infoPort=46173, infoSecurePort=0, ipcPort=41559, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:34,821 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb09c07abd9aeb62f with lease ID 0x5b612b60ecdb631c: Processing first storage report for DS-f2f46fdc-cde6-48ff-ab65-bed20d14a134 from datanode DatanodeRegistration(127.0.0.1:45005, datanodeUuid=053e9b0a-0ad1-4ae0-be32-c63262dd71be, infoPort=46173, infoSecurePort=0, ipcPort=41559, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006) 2024-11-15T11:46:34,821 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb09c07abd9aeb62f with lease ID 0x5b612b60ecdb631c: from storage DS-f2f46fdc-cde6-48ff-ab65-bed20d14a134 node DatanodeRegistration(127.0.0.1:45005, datanodeUuid=053e9b0a-0ad1-4ae0-be32-c63262dd71be, infoPort=46173, infoSecurePort=0, ipcPort=41559, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:34,848 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22 2024-11-15T11:46:34,853 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/zookeeper_0, clientPort=61276, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T11:46:34,855 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61276 2024-11-15T11:46:34,855 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:34,858 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:34,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45005 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:46:34,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43663 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:46:34,884 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2 with version=8 2024-11-15T11:46:34,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/hbase-staging 2024-11-15T11:46:34,887 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:46:34,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:34,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:34,887 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:46:34,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:34,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:46:34,887 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T11:46:34,888 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:46:34,891 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34851 2024-11-15T11:46:34,894 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34851 connecting to ZooKeeper ensemble=127.0.0.1:61276 2024-11-15T11:46:34,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348510x0, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:46:34,938 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34851-0x1013f9b0fa70000 connected 2024-11-15T11:46:35,073 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:35,075 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:35,077 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:46:35,078 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2, hbase.cluster.distributed=false 2024-11-15T11:46:35,080 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:46:35,080 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34851 2024-11-15T11:46:35,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34851 2024-11-15T11:46:35,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34851 2024-11-15T11:46:35,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34851 2024-11-15T11:46:35,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34851 2024-11-15T11:46:35,096 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:46:35,096 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:35,096 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:35,096 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:46:35,096 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:35,096 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:46:35,096 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:46:35,096 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:46:35,097 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46507 2024-11-15T11:46:35,098 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46507 connecting to ZooKeeper ensemble=127.0.0.1:61276 2024-11-15T11:46:35,099 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:35,100 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:35,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:465070x0, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:46:35,139 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46507-0x1013f9b0fa70001 connected 2024-11-15T11:46:35,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:46:35,139 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:46:35,140 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:46:35,141 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T11:46:35,143 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:46:35,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46507 2024-11-15T11:46:35,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46507 2024-11-15T11:46:35,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46507 2024-11-15T11:46:35,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46507 2024-11-15T11:46:35,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46507 2024-11-15T11:46:35,161 DEBUG [M:0;7adf9b3d9d04:34851 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7adf9b3d9d04:34851 2024-11-15T11:46:35,162 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7adf9b3d9d04,34851,1731671194886 2024-11-15T11:46:35,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:46:35,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:46:35,188 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7adf9b3d9d04,34851,1731671194886 2024-11-15T11:46:35,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:35,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T11:46:35,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:35,198 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T11:46:35,198 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7adf9b3d9d04,34851,1731671194886 from backup master directory 2024-11-15T11:46:35,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7adf9b3d9d04,34851,1731671194886 2024-11-15T11:46:35,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:46:35,206 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:46:35,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:46:35,206 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7adf9b3d9d04,34851,1731671194886 2024-11-15T11:46:35,210 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/hbase.id] with ID: 6586e257-dc48-4515-9432-63290e252123 2024-11-15T11:46:35,210 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/.tmp/hbase.id 2024-11-15T11:46:35,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45005 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:46:35,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43663 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:46:35,217 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/.tmp/hbase.id]:[hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/hbase.id] 2024-11-15T11:46:35,229 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:35,229 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T11:46:35,230 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-15T11:46:35,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:35,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:35,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45005 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:46:35,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43663 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:46:35,249 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:46:35,250 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T11:46:35,250 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:46:35,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45005 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:46:35,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43663 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:46:35,259 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store 2024-11-15T11:46:35,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45005 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:46:35,269 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:46:35,269 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:46:35,269 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:35,269 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:35,269 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:46:35,270 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:35,270 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:46:35,270 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671195269Disabling compacts and flushes for region at 1731671195269Disabling writes for close at 1731671195269Writing region close event to WAL at 1731671195270 (+1 ms)Closed at 1731671195270 2024-11-15T11:46:35,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43663 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:46:35,271 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/.initializing 2024-11-15T11:46:35,271 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886 2024-11-15T11:46:35,275 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C34851%2C1731671194886, suffix=, logDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886, archiveDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/oldWALs, maxLogs=10 2024-11-15T11:46:35,276 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 2024-11-15T11:46:35,282 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 2024-11-15T11:46:35,291 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46173:46173),(127.0.0.1/127.0.0.1:33755:33755)] 2024-11-15T11:46:35,291 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:46:35,292 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:46:35,292 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,292 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,294 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,296 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T11:46:35,296 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:35,296 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:35,296 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T11:46:35,298 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:35,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:46:35,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T11:46:35,300 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:35,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:46:35,301 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T11:46:35,302 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:35,303 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:46:35,303 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,304 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,304 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,306 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,306 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,306 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T11:46:35,308 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:46:35,310 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:46:35,311 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782073, jitterRate=-0.0055429041385650635}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T11:46:35,311 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731671195292Initializing all the Stores at 1731671195293 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671195293Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671195294 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671195294Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671195294Cleaning up temporary data from old regions at 1731671195306 (+12 ms)Region opened successfully at 1731671195311 (+5 ms) 2024-11-15T11:46:35,312 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T11:46:35,315 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7788b82f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:46:35,316 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T11:46:35,316 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T11:46:35,316 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T11:46:35,317 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T11:46:35,317 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T11:46:35,318 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T11:46:35,318 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T11:46:35,323 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T11:46:35,324 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T11:46:35,347 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T11:46:35,348 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T11:46:35,349 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T11:46:35,356 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T11:46:35,356 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T11:46:35,357 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T11:46:35,364 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T11:46:35,365 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T11:46:35,372 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T11:46:35,375 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T11:46:35,381 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T11:46:35,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:46:35,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:46:35,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:35,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:35,390 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7adf9b3d9d04,34851,1731671194886, sessionid=0x1013f9b0fa70000, setting cluster-up flag (Was=false) 2024-11-15T11:46:35,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:35,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:35,431 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T11:46:35,433 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,34851,1731671194886 2024-11-15T11:46:35,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:35,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:35,472 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T11:46:35,474 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,34851,1731671194886 2024-11-15T11:46:35,476 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T11:46:35,478 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T11:46:35,478 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T11:46:35,478 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T11:46:35,478 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7adf9b3d9d04,34851,1731671194886 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T11:46:35,480 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:46:35,480 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:46:35,480 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:46:35,480 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:46:35,480 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7adf9b3d9d04:0, corePoolSize=10, maxPoolSize=10 2024-11-15T11:46:35,480 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,481 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:46:35,481 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,482 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731671225482 2024-11-15T11:46:35,482 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T11:46:35,482 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T11:46:35,483 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T11:46:35,483 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T11:46:35,483 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T11:46:35,483 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:46:35,483 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T11:46:35,483 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T11:46:35,483 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,484 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:35,484 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T11:46:35,487 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T11:46:35,487 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T11:46:35,487 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T11:46:35,489 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T11:46:35,489 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T11:46:35,490 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671195489,5,FailOnTimeoutGroup] 2024-11-15T11:46:35,493 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671195491,5,FailOnTimeoutGroup] 2024-11-15T11:46:35,493 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,493 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T11:46:35,493 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,493 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43663 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:46:35,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45005 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:46:35,498 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T11:46:35,498 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2 2024-11-15T11:46:35,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43663 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:46:35,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45005 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:46:35,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:46:35,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:46:35,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:46:35,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:35,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:35,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:46:35,523 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:46:35,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:35,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:35,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:46:35,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:46:35,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:35,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:35,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:46:35,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:46:35,529 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:35,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:35,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:46:35,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740 2024-11-15T11:46:35,531 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740 2024-11-15T11:46:35,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:46:35,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:46:35,533 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:46:35,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:46:35,537 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:46:35,537 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698509, jitterRate=-0.11180029809474945}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:46:35,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731671195513Initializing all the Stores at 1731671195514 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671195514Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671195518 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671195518Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671195518Cleaning up temporary data from old regions at 1731671195532 (+14 ms)Region opened successfully at 1731671195538 (+6 ms) 2024-11-15T11:46:35,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:46:35,539 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:46:35,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:46:35,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:46:35,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:46:35,539 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:46:35,540 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671195539Disabling compacts and flushes for region at 1731671195539Disabling writes for close at 1731671195539Writing region close event to WAL at 1731671195539Closed at 1731671195539 2024-11-15T11:46:35,541 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:46:35,541 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T11:46:35,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T11:46:35,543 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:46:35,544 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T11:46:35,552 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(746): ClusterId : 6586e257-dc48-4515-9432-63290e252123 2024-11-15T11:46:35,552 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:46:35,572 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:46:35,572 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:46:35,582 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:46:35,582 DEBUG [RS:0;7adf9b3d9d04:46507 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ab7fbd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:46:35,600 DEBUG [RS:0;7adf9b3d9d04:46507 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7adf9b3d9d04:46507 2024-11-15T11:46:35,600 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:46:35,600 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:46:35,600 DEBUG [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:46:35,601 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(2659): reportForDuty to master=7adf9b3d9d04,34851,1731671194886 with port=46507, startcode=1731671195095 2024-11-15T11:46:35,601 DEBUG [RS:0;7adf9b3d9d04:46507 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:46:35,604 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54659, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:46:35,604 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34851 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:35,604 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34851 {}] master.ServerManager(517): Registering regionserver=7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:35,606 DEBUG [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2 2024-11-15T11:46:35,607 DEBUG [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40139 2024-11-15T11:46:35,607 DEBUG [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:46:35,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:46:35,615 DEBUG [RS:0;7adf9b3d9d04:46507 {}] zookeeper.ZKUtil(111): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:35,615 WARN [RS:0;7adf9b3d9d04:46507 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:46:35,615 INFO [RS:0;7adf9b3d9d04:46507 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:46:35,615 DEBUG [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:35,615 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7adf9b3d9d04,46507,1731671195095] 2024-11-15T11:46:35,620 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:46:35,627 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:46:35,631 INFO [RS:0;7adf9b3d9d04:46507 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:46:35,631 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,631 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:46:35,632 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:46:35,632 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,633 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,633 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,633 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,633 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,633 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,633 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:46:35,633 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,633 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,633 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,633 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,634 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,634 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:35,634 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:46:35,634 DEBUG [RS:0;7adf9b3d9d04:46507 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:46:35,634 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,634 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,635 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,635 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,635 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,635 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,46507,1731671195095-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:46:35,658 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:46:35,658 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,46507,1731671195095-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,658 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,658 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.Replication(171): 7adf9b3d9d04,46507,1731671195095 started 2024-11-15T11:46:35,675 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:35,675 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1482): Serving as 7adf9b3d9d04,46507,1731671195095, RpcServer on 7adf9b3d9d04/172.17.0.2:46507, sessionid=0x1013f9b0fa70001 2024-11-15T11:46:35,675 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:46:35,675 DEBUG [RS:0;7adf9b3d9d04:46507 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:35,675 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,46507,1731671195095' 2024-11-15T11:46:35,675 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:46:35,676 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:46:35,677 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:46:35,677 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:46:35,677 DEBUG [RS:0;7adf9b3d9d04:46507 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:35,677 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,46507,1731671195095' 2024-11-15T11:46:35,677 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:46:35,677 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:46:35,678 DEBUG [RS:0;7adf9b3d9d04:46507 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:46:35,678 INFO [RS:0;7adf9b3d9d04:46507 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:46:35,678 INFO [RS:0;7adf9b3d9d04:46507 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:46:35,695 WARN [7adf9b3d9d04:34851 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-15T11:46:35,781 INFO [RS:0;7adf9b3d9d04:46507 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C46507%2C1731671195095, suffix=, logDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095, archiveDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs, maxLogs=32 2024-11-15T11:46:35,782 INFO [RS:0;7adf9b3d9d04:46507 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 2024-11-15T11:46:35,789 INFO [RS:0;7adf9b3d9d04:46507 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 2024-11-15T11:46:35,794 DEBUG [RS:0;7adf9b3d9d04:46507 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33755:33755),(127.0.0.1/127.0.0.1:46173:46173)] 2024-11-15T11:46:35,945 DEBUG [7adf9b3d9d04:34851 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T11:46:35,945 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:35,947 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,46507,1731671195095, state=OPENING 2024-11-15T11:46:35,998 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T11:46:36,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:36,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:46:36,031 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:46:36,031 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:46:36,031 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:46:36,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,46507,1731671195095}] 2024-11-15T11:46:36,186 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:46:36,189 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52483, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:46:36,194 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T11:46:36,195 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:46:36,198 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C46507%2C1731671195095.meta, suffix=.meta, logDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095, archiveDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs, maxLogs=32 2024-11-15T11:46:36,200 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta 2024-11-15T11:46:36,211 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta 2024-11-15T11:46:36,216 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33755:33755),(127.0.0.1/127.0.0.1:46173:46173)] 2024-11-15T11:46:36,223 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:46:36,223 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T11:46:36,223 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T11:46:36,224 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T11:46:36,224 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T11:46:36,224 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:46:36,224 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T11:46:36,224 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T11:46:36,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:46:36,262 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:46:36,262 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:36,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:36,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:46:36,264 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:46:36,264 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:36,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:36,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:46:36,266 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:46:36,266 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:36,267 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:36,267 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:46:36,269 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:46:36,269 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:36,270 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:46:36,270 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:46:36,271 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740 2024-11-15T11:46:36,273 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740 2024-11-15T11:46:36,274 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:46:36,274 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:46:36,275 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:46:36,277 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:46:36,278 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809482, jitterRate=0.02930966019630432}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:46:36,278 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T11:46:36,279 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731671196224Writing region info on filesystem at 1731671196224Initializing all the Stores at 1731671196226 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671196226Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671196226Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671196257 (+31 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671196257Cleaning up temporary data from old regions at 1731671196274 (+17 ms)Running coprocessor post-open hooks at 1731671196278 (+4 ms)Region opened successfully at 1731671196279 (+1 ms) 2024-11-15T11:46:36,281 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731671196185 2024-11-15T11:46:36,285 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T11:46:36,285 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T11:46:36,287 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:36,289 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,46507,1731671195095, state=OPEN 2024-11-15T11:46:36,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:46:36,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:46:36,340 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:36,340 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:46:36,340 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:46:36,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T11:46:36,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,46507,1731671195095 in 309 msec 2024-11-15T11:46:36,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T11:46:36,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 803 msec 2024-11-15T11:46:36,350 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:46:36,350 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T11:46:36,352 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:46:36,353 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,46507,1731671195095, seqNum=-1] 2024-11-15T11:46:36,353 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:46:36,355 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41739, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:46:36,364 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 884 msec 2024-11-15T11:46:36,364 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731671196364, completionTime=-1 2024-11-15T11:46:36,364 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T11:46:36,364 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-15T11:46:36,366 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-15T11:46:36,366 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731671256366 2024-11-15T11:46:36,366 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731671316366 2024-11-15T11:46:36,366 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-15T11:46:36,367 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34851,1731671194886-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,367 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34851,1731671194886-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,367 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34851,1731671194886-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,367 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7adf9b3d9d04:34851, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,367 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,367 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,369 DEBUG [master/7adf9b3d9d04:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T11:46:36,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.166sec 2024-11-15T11:46:36,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T11:46:36,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T11:46:36,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T11:46:36,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T11:46:36,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T11:46:36,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34851,1731671194886-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:46:36,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34851,1731671194886-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T11:46:36,375 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T11:46:36,375 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T11:46:36,375 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,34851,1731671194886-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14e9f456, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:46:36,463 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7adf9b3d9d04,34851,-1 for getting cluster id 2024-11-15T11:46:36,464 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:46:36,465 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6586e257-dc48-4515-9432-63290e252123' 2024-11-15T11:46:36,465 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:46:36,465 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6586e257-dc48-4515-9432-63290e252123" 2024-11-15T11:46:36,466 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44eb01bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:46:36,466 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7adf9b3d9d04,34851,-1] 2024-11-15T11:46:36,466 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:46:36,466 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:46:36,468 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46790, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:46:36,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c7e15e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:46:36,469 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:46:36,470 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,46507,1731671195095, seqNum=-1] 2024-11-15T11:46:36,470 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:46:36,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:46:36,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7adf9b3d9d04,34851,1731671194886 2024-11-15T11:46:36,474 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:36,477 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T11:46:36,494 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:46:36,494 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:36,494 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:36,494 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:46:36,494 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:46:36,494 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:46:36,494 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:46:36,495 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:46:36,495 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39827 2024-11-15T11:46:36,497 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39827 connecting to ZooKeeper ensemble=127.0.0.1:61276 2024-11-15T11:46:36,497 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:36,499 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:46:36,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398270x0, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:46:36,538 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39827-0x1013f9b0fa70002 connected 2024-11-15T11:46:36,538 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39827-0x1013f9b0fa70002, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-15T11:46:36,538 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-15T11:46:36,540 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:46:36,541 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:46:36,542 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39827-0x1013f9b0fa70002, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T11:46:36,544 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39827-0x1013f9b0fa70002, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:46:36,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39827 2024-11-15T11:46:36,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39827 2024-11-15T11:46:36,546 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39827 2024-11-15T11:46:36,546 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39827 2024-11-15T11:46:36,547 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39827 2024-11-15T11:46:36,549 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(746): ClusterId : 6586e257-dc48-4515-9432-63290e252123 2024-11-15T11:46:36,549 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:46:36,578 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:46:36,578 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:46:36,611 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:46:36,612 DEBUG [RS:1;7adf9b3d9d04:39827 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70e08ad3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:46:36,626 DEBUG [RS:1;7adf9b3d9d04:39827 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7adf9b3d9d04:39827 2024-11-15T11:46:36,626 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:46:36,626 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:46:36,626 DEBUG [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:46:36,626 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(2659): reportForDuty to master=7adf9b3d9d04,34851,1731671194886 with port=39827, startcode=1731671196494 2024-11-15T11:46:36,627 DEBUG [RS:1;7adf9b3d9d04:39827 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:46:36,628 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59211, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:46:36,629 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34851 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7adf9b3d9d04,39827,1731671196494 2024-11-15T11:46:36,629 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34851 {}] master.ServerManager(517): Registering regionserver=7adf9b3d9d04,39827,1731671196494 2024-11-15T11:46:36,630 DEBUG [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2 2024-11-15T11:46:36,630 DEBUG [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40139 2024-11-15T11:46:36,630 DEBUG [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:46:36,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:46:36,676 DEBUG [RS:1;7adf9b3d9d04:39827 {}] zookeeper.ZKUtil(111): regionserver:39827-0x1013f9b0fa70002, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7adf9b3d9d04,39827,1731671196494 2024-11-15T11:46:36,676 WARN [RS:1;7adf9b3d9d04:39827 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:46:36,676 INFO [RS:1;7adf9b3d9d04:39827 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:46:36,676 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7adf9b3d9d04,39827,1731671196494] 2024-11-15T11:46:36,676 DEBUG [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494 2024-11-15T11:46:36,682 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:46:36,684 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:46:36,685 INFO [RS:1;7adf9b3d9d04:39827 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:46:36,685 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,685 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:46:36,686 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:46:36,686 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,687 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,687 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,687 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,687 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,687 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,687 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:46:36,687 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,687 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,687 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,688 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,688 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,688 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:46:36,688 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:46:36,688 DEBUG [RS:1;7adf9b3d9d04:39827 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:46:36,688 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,689 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,689 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,689 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,689 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,689 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,39827,1731671196494-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:46:36,704 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:46:36,704 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,39827,1731671196494-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,705 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,705 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.Replication(171): 7adf9b3d9d04,39827,1731671196494 started 2024-11-15T11:46:36,717 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:46:36,717 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(1482): Serving as 7adf9b3d9d04,39827,1731671196494, RpcServer on 7adf9b3d9d04/172.17.0.2:39827, sessionid=0x1013f9b0fa70002 2024-11-15T11:46:36,717 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:46:36,718 DEBUG [RS:1;7adf9b3d9d04:39827 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7adf9b3d9d04,39827,1731671196494 2024-11-15T11:46:36,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;7adf9b3d9d04:39827,5,FailOnTimeoutGroup] 2024-11-15T11:46:36,718 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,39827,1731671196494' 2024-11-15T11:46:36,718 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:46:36,718 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-15T11:46:36,718 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T11:46:36,718 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:46:36,719 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:46:36,719 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:46:36,719 DEBUG [RS:1;7adf9b3d9d04:39827 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7adf9b3d9d04,39827,1731671196494 2024-11-15T11:46:36,719 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,39827,1731671196494' 2024-11-15T11:46:36,719 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:46:36,719 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:46:36,719 DEBUG [RS:1;7adf9b3d9d04:39827 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:46:36,719 INFO [RS:1;7adf9b3d9d04:39827 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:46:36,719 INFO [RS:1;7adf9b3d9d04:39827 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:46:36,719 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 7adf9b3d9d04,34851,1731671194886 2024-11-15T11:46:36,719 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3ee6ce85 2024-11-15T11:46:36,720 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T11:46:36,722 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46800, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T11:46:36,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34851 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T11:46:36,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34851 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T11:46:36,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34851 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:46:36,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34851 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T11:46:36,726 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:46:36,726 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:36,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34851 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-15T11:46:36,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34851 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:46:36,727 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:46:36,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43663 is added to blk_1073741835_1011 (size=393) 2024-11-15T11:46:36,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45005 is added to blk_1073741835_1011 (size=393) 2024-11-15T11:46:36,736 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 89ebd913c1105ed0d73496ad67266f67, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2 2024-11-15T11:46:36,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45005 is added to blk_1073741836_1012 (size=76) 2024-11-15T11:46:36,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43663 is added to blk_1073741836_1012 (size=76) 2024-11-15T11:46:36,824 INFO [RS:1;7adf9b3d9d04:39827 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C39827%2C1731671196494, suffix=, logDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494, archiveDir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs, maxLogs=32 2024-11-15T11:46:36,826 INFO [RS:1;7adf9b3d9d04:39827 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 2024-11-15T11:46:36,834 INFO [RS:1;7adf9b3d9d04:39827 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 2024-11-15T11:46:36,838 DEBUG [RS:1;7adf9b3d9d04:39827 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46173:46173),(127.0.0.1/127.0.0.1:33755:33755)] 2024-11-15T11:46:37,144 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:46:37,144 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 89ebd913c1105ed0d73496ad67266f67, disabling compactions & flushes 2024-11-15T11:46:37,144 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:46:37,145 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:46:37,145 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. after waiting 0 ms 2024-11-15T11:46:37,145 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:46:37,145 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:46:37,145 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 89ebd913c1105ed0d73496ad67266f67: Waiting for close lock at 1731671197144Disabling compacts and flushes for region at 1731671197144Disabling writes for close at 1731671197145 (+1 ms)Writing region close event to WAL at 1731671197145Closed at 1731671197145 2024-11-15T11:46:37,146 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:46:37,147 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731671197146"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731671197146"}]},"ts":"1731671197146"} 2024-11-15T11:46:37,149 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T11:46:37,151 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:46:37,151 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731671197151"}]},"ts":"1731671197151"} 2024-11-15T11:46:37,154 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-15T11:46:37,156 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {7adf9b3d9d04=0} racks are {/default-rack=0} 2024-11-15T11:46:37,161 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:46:37,161 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:46:37,161 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:46:37,161 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:46:37,161 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:46:37,161 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:46:37,161 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:46:37,162 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=89ebd913c1105ed0d73496ad67266f67, ASSIGN}] 2024-11-15T11:46:37,164 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=89ebd913c1105ed0d73496ad67266f67, ASSIGN 2024-11-15T11:46:37,165 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=89ebd913c1105ed0d73496ad67266f67, ASSIGN; state=OFFLINE, location=7adf9b3d9d04,46507,1731671195095; forceNewPlan=false, retain=false 2024-11-15T11:46:37,316 INFO [7adf9b3d9d04:34851 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-15T11:46:37,316 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=89ebd913c1105ed0d73496ad67266f67, regionState=OPENING, regionLocation=7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:37,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=89ebd913c1105ed0d73496ad67266f67, ASSIGN because future has completed 2024-11-15T11:46:37,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 89ebd913c1105ed0d73496ad67266f67, server=7adf9b3d9d04,46507,1731671195095}] 2024-11-15T11:46:37,478 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:46:37,478 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 89ebd913c1105ed0d73496ad67266f67, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:46:37,479 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,479 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:46:37,479 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,479 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,481 INFO [StoreOpener-89ebd913c1105ed0d73496ad67266f67-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,482 INFO [StoreOpener-89ebd913c1105ed0d73496ad67266f67-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 89ebd913c1105ed0d73496ad67266f67 columnFamilyName info 2024-11-15T11:46:37,483 DEBUG [StoreOpener-89ebd913c1105ed0d73496ad67266f67-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:46:37,483 INFO [StoreOpener-89ebd913c1105ed0d73496ad67266f67-1 {}] regionserver.HStore(327): Store=89ebd913c1105ed0d73496ad67266f67/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:46:37,483 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,484 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,485 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,485 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,485 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,487 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,490 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:46:37,490 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 89ebd913c1105ed0d73496ad67266f67; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755460, jitterRate=-0.03938351571559906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:46:37,490 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:37,491 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 89ebd913c1105ed0d73496ad67266f67: Running coprocessor pre-open hook at 1731671197479Writing region info on filesystem at 1731671197479Initializing all the Stores at 1731671197481 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671197481Cleaning up temporary data from old regions at 1731671197485 (+4 ms)Running coprocessor post-open hooks at 1731671197491 (+6 ms)Region opened successfully at 1731671197491 2024-11-15T11:46:37,492 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67., pid=6, masterSystemTime=1731671197474 2024-11-15T11:46:37,495 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:46:37,495 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:46:37,496 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=89ebd913c1105ed0d73496ad67266f67, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,46507,1731671195095 2024-11-15T11:46:37,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 89ebd913c1105ed0d73496ad67266f67, server=7adf9b3d9d04,46507,1731671195095 because future has completed 2024-11-15T11:46:37,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T11:46:37,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 89ebd913c1105ed0d73496ad67266f67, server=7adf9b3d9d04,46507,1731671195095 in 180 msec 2024-11-15T11:46:37,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T11:46:37,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=89ebd913c1105ed0d73496ad67266f67, ASSIGN in 341 msec 2024-11-15T11:46:37,507 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:46:37,507 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731671197507"}]},"ts":"1731671197507"} 2024-11-15T11:46:37,510 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-15T11:46:37,511 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:46:37,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 788 msec 2024-11-15T11:46:41,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T11:46:41,017 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T11:46:41,018 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T11:46:41,018 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-15T11:46:41,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:46:41,019 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T11:46:41,020 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T11:46:41,020 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T11:46:41,726 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T11:46:41,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:46:41,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:46:41,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:46:41,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:46:41,770 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-15T11:46:46,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34851 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:46:46,744 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-15T11:46:46,744 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-15T11:46:46,751 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T11:46:46,751 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:46:46,770 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:46,775 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:46:46,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:46:46,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:46:46,776 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:46:46,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c68f920{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:46:46,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65349436{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:46:46,882 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@304af6f5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/java.io.tmpdir/jetty-localhost-39543-hadoop-hdfs-3_4_1-tests_jar-_-any-12787316915140889120/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:46,882 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41559526{HTTP/1.1, (http/1.1)}{localhost:39543} 2024-11-15T11:46:46,883 INFO [Time-limited test {}] server.Server(415): Started @122636ms 2024-11-15T11:46:46,884 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:46:46,914 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:46,917 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:46:46,918 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:46:46,918 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:46:46,918 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:46:46,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3904e150{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:46:46,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@514acf4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:46:47,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4009f856{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/java.io.tmpdir/jetty-localhost-44693-hadoop-hdfs-3_4_1-tests_jar-_-any-6373447581964519049/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:47,013 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b4117c9{HTTP/1.1, (http/1.1)}{localhost:44693} 2024-11-15T11:46:47,014 INFO [Time-limited test {}] server.Server(415): Started @122768ms 2024-11-15T11:46:47,015 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:46:47,059 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:46:47,062 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:46:47,063 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:46:47,063 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:46:47,063 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:46:47,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e1ad43e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:46:47,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@136e75a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:46:47,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@638f230f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/java.io.tmpdir/jetty-localhost-37363-hadoop-hdfs-3_4_1-tests_jar-_-any-6609352068834141992/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:47,157 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69bcca{HTTP/1.1, (http/1.1)}{localhost:37363} 2024-11-15T11:46:47,157 INFO [Time-limited test {}] server.Server(415): Started @122911ms 2024-11-15T11:46:47,159 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:46:47,722 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data5/current/BP-1235867767-172.17.0.2-1731671193006/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:47,722 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data6/current/BP-1235867767-172.17.0.2-1731671193006/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:47,741 WARN [Thread-810 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:46:47,743 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d75f338ab112fd7 with lease ID 0x5b612b60ecdb631d: Processing first storage report for DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687 from datanode DatanodeRegistration(127.0.0.1:41789, datanodeUuid=c63d8502-ff64-4933-9fbb-ea5dca055eae, infoPort=43753, infoSecurePort=0, ipcPort=33433, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006) 2024-11-15T11:46:47,743 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d75f338ab112fd7 with lease ID 0x5b612b60ecdb631d: from storage DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687 node DatanodeRegistration(127.0.0.1:41789, datanodeUuid=c63d8502-ff64-4933-9fbb-ea5dca055eae, infoPort=43753, infoSecurePort=0, ipcPort=33433, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:47,743 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d75f338ab112fd7 with lease ID 0x5b612b60ecdb631d: Processing first storage report for DS-24d7938e-51b0-412c-bc03-1ad27bfa100c from datanode DatanodeRegistration(127.0.0.1:41789, datanodeUuid=c63d8502-ff64-4933-9fbb-ea5dca055eae, infoPort=43753, infoSecurePort=0, ipcPort=33433, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006) 2024-11-15T11:46:47,743 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d75f338ab112fd7 with lease ID 0x5b612b60ecdb631d: from storage DS-24d7938e-51b0-412c-bc03-1ad27bfa100c node DatanodeRegistration(127.0.0.1:41789, datanodeUuid=c63d8502-ff64-4933-9fbb-ea5dca055eae, infoPort=43753, infoSecurePort=0, ipcPort=33433, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:47,856 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data7/current/BP-1235867767-172.17.0.2-1731671193006/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:47,856 WARN [Thread-881 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data8/current/BP-1235867767-172.17.0.2-1731671193006/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:47,874 WARN [Thread-832 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:46:47,876 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa41ba5e18e12310e with lease ID 0x5b612b60ecdb631e: Processing first storage report for DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386 from datanode DatanodeRegistration(127.0.0.1:42575, datanodeUuid=7fe344e4-6b4d-4a1e-b778-675a417ac342, infoPort=34775, infoSecurePort=0, ipcPort=39005, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006) 2024-11-15T11:46:47,876 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa41ba5e18e12310e with lease ID 0x5b612b60ecdb631e: from storage DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386 node DatanodeRegistration(127.0.0.1:42575, datanodeUuid=7fe344e4-6b4d-4a1e-b778-675a417ac342, infoPort=34775, infoSecurePort=0, ipcPort=39005, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:47,876 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa41ba5e18e12310e with lease ID 0x5b612b60ecdb631e: Processing first storage report for DS-437e2d66-2dfe-4e59-8763-5206883e6928 from datanode DatanodeRegistration(127.0.0.1:42575, datanodeUuid=7fe344e4-6b4d-4a1e-b778-675a417ac342, infoPort=34775, infoSecurePort=0, ipcPort=39005, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006) 2024-11-15T11:46:47,876 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa41ba5e18e12310e with lease ID 0x5b612b60ecdb631e: from storage DS-437e2d66-2dfe-4e59-8763-5206883e6928 node DatanodeRegistration(127.0.0.1:42575, datanodeUuid=7fe344e4-6b4d-4a1e-b778-675a417ac342, infoPort=34775, infoSecurePort=0, ipcPort=39005, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:47,958 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9/current/BP-1235867767-172.17.0.2-1731671193006/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:47,958 WARN [Thread-892 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10/current/BP-1235867767-172.17.0.2-1731671193006/current, will proceed with Du for space computation calculation, 2024-11-15T11:46:47,980 WARN [Thread-854 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:46:47,982 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1d9d7be3cc0e7ce with lease ID 0x5b612b60ecdb631f: Processing first storage report for DS-566e9c75-d247-4149-ba27-165b7c1bc5e2 from datanode DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006) 2024-11-15T11:46:47,982 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1d9d7be3cc0e7ce with lease ID 0x5b612b60ecdb631f: from storage DS-566e9c75-d247-4149-ba27-165b7c1bc5e2 node DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:47,982 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1d9d7be3cc0e7ce with lease ID 0x5b612b60ecdb631f: Processing first storage report for DS-b09a4665-f5ac-40f3-b7d9-1318bd90f4e4 from datanode DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006) 2024-11-15T11:46:47,982 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1d9d7be3cc0e7ce with lease ID 0x5b612b60ecdb631f: from storage DS-b09a4665-f5ac-40f3-b7d9-1318bd90f4e4 node DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:46:47,985 WARN [ResponseProcessor for block BP-1235867767-172.17.0.2-1731671193006:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1235867767-172.17.0.2-1731671193006:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:47,985 WARN [ResponseProcessor for block BP-1235867767-172.17.0.2-1731671193006:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1235867767-172.17.0.2-1731671193006:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:47,985 WARN [ResponseProcessor for block BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:47,985 WARN [ResponseProcessor for block BP-1235867767-172.17.0.2-1731671193006:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1235867767-172.17.0.2-1731671193006:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1235867767-172.17.0.2-1731671193006:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:47,985 WARN [DataStreamer for file /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 block BP-1235867767-172.17.0.2-1731671193006:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK], DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:47,985 WARN [DataStreamer for file /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 block BP-1235867767-172.17.0.2-1731671193006:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:47,986 WARN [DataStreamer for file /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta block BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:47,986 WARN [DataStreamer for file /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 block BP-1235867767-172.17.0.2-1731671193006:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK], DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:47,985 WARN [PacketResponder: BP-1235867767-172.17.0.2-1731671193006:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45005] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:47,986 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:40440 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43663:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40440 dst: /127.0.0.1:43663 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:47,986 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-517491205_22 at /127.0.0.1:48326 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45005:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48326 dst: /127.0.0.1:45005 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:47,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2073313263_22 at /127.0.0.1:40390 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43663:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40390 dst: /127.0.0.1:43663 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:47,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2073313263_22 at /127.0.0.1:48272 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45005:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48272 dst: /127.0.0.1:45005 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:47,986 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:40424 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43663:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40424 dst: /127.0.0.1:43663 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:47,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:48288 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45005:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48288 dst: /127.0.0.1:45005 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:47,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-517491205_22 at /127.0.0.1:40460 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:43663:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40460 dst: /127.0.0.1:43663 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:47,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:48282 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45005:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48282 dst: /127.0.0.1:45005 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:47,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ae35b98{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:47,988 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47d1c4f2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:46:47,989 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:46:47,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57ebe64c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:46:47,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10b53169{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,STOPPED} 2024-11-15T11:46:47,989 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:46:47,990 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:46:47,990 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1235867767-172.17.0.2-1731671193006 (Datanode Uuid 053e9b0a-0ad1-4ae0-be32-c63262dd71be) service to localhost/127.0.0.1:40139 2024-11-15T11:46:47,990 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:46:47,990 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data3/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:47,990 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data4/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:47,990 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:46:47,991 WARN [DataStreamer for file /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 block BP-1235867767-172.17.0.2-1731671193006:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:47,992 WARN [DataStreamer for file /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta block BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:47,992 WARN [DataStreamer for file /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 block BP-1235867767-172.17.0.2-1731671193006:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:47,993 WARN [DataStreamer for file /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 block BP-1235867767-172.17.0.2-1731671193006:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:47,996 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a66255b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:47,996 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1827207e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:46:47,996 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:46:47,996 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@180ba686{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:46:47,996 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f87a993{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,STOPPED} 2024-11-15T11:46:47,998 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:46:47,998 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1235867767-172.17.0.2-1731671193006 (Datanode Uuid 416b8e7b-471d-4895-ba54-25752c4128d5) service to localhost/127.0.0.1:40139 2024-11-15T11:46:47,998 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data1/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:47,998 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data2/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:47,999 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:46:47,999 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:46:47,999 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:46:48,002 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67., hostname=7adf9b3d9d04,46507,1731671195095, seqNum=2] 2024-11-15T11:46:48,004 ERROR [FSHLog-0-hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2-prefix:7adf9b3d9d04,46507,1731671195095 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:48,004 WARN [FSHLog-0-hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2-prefix:7adf9b3d9d04,46507,1731671195095 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:48,004 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C46507%2C1731671195095:(num 1731671195781) roll requested 2024-11-15T11:46:48,005 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C46507%2C1731671195095.1731671208004 2024-11-15T11:46:48,007 WARN [Thread-902 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:48,008 WARN [Thread-902 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:48,008 WARN [Thread-902 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741838_1018 2024-11-15T11:46:48,010 WARN [Thread-902 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:48,017 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:48,017 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:48,017 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:48,017 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:48,017 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:48,017 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671208004 2024-11-15T11:46:48,018 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:48,018 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:48,019 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-15T11:46:48,019 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-15T11:46:48,019 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 2024-11-15T11:46:48,021 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34775:34775),(127.0.0.1/127.0.0.1:43753:43753)] 2024-11-15T11:46:48,021 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 is not closed yet, will try archiving it next time 2024-11-15T11:46:48,022 WARN [IPC Server handler 1 on default port 40139 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-15T11:46:48,026 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 after 5ms 2024-11-15T11:46:48,689 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:49,252 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:50,021 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:50,023 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671208004 2024-11-15T11:46:50,024 WARN [ResponseProcessor for block BP-1235867767-172.17.0.2-1731671193006:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1235867767-172.17.0.2-1731671193006:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:50,025 WARN [DataStreamer for file /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671208004 block BP-1235867767-172.17.0.2-1731671193006:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK], DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:50,025 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:53348 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:42575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53348 dst: /127.0.0.1:42575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:50,026 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:53596 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:41789:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53596 dst: /127.0.0.1:41789 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:50,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4009f856{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:50,066 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b4117c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:46:50,066 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:46:50,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@514acf4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:46:50,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3904e150{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,STOPPED} 2024-11-15T11:46:50,069 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:46:50,069 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:46:50,069 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1235867767-172.17.0.2-1731671193006 (Datanode Uuid 7fe344e4-6b4d-4a1e-b778-675a417ac342) service to localhost/127.0.0.1:40139 2024-11-15T11:46:50,069 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:46:50,070 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data7/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:50,070 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data8/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:50,070 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:46:50,690 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:51,252 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:52,022 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:52,023 WARN [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]] 2024-11-15T11:46:52,023 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C46507%2C1731671195095:(num 1731671208004) roll requested 2024-11-15T11:46:52,023 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C46507%2C1731671195095.1731671212023 2024-11-15T11:46:52,027 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 after 4007ms 2024-11-15T11:46:52,027 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:52,027 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:52,027 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741840_1022 2024-11-15T11:46:52,028 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK] 2024-11-15T11:46:52,029 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:52,029 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:52,029 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741841_1023 2024-11-15T11:46:52,030 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:52,032 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45005 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:52,032 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33712 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741842_1024 to mirror 127.0.0.1:45005 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:52,032 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:52,032 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741842_1024 2024-11-15T11:46:52,032 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33712 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T11:46:52,033 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33712 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33712 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:52,033 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] 2024-11-15T11:46:52,040 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:52,040 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:52,040 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:52,040 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:52,040 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:52,041 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671208004 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671212023 2024-11-15T11:46:52,046 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38247:38247),(127.0.0.1/127.0.0.1:43753:43753)] 2024-11-15T11:46:52,047 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 is not closed yet, will try archiving it next time 2024-11-15T11:46:52,047 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671208004 is not closed yet, will try archiving it next time 2024-11-15T11:46:52,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41789 is added to blk_1073741839_1021 (size=2431) 2024-11-15T11:46:52,076 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:46:52,449 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 is not closed yet, will try archiving it next time 2024-11-15T11:46:52,690 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:53,253 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,047 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,080 WARN [ResponseProcessor for block BP-1235867767-172.17.0.2-1731671193006:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1235867767-172.17.0.2-1731671193006:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-1235867767-172.17.0.2-1731671193006:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,081 WARN [DataStreamer for file /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671212023 block BP-1235867767-172.17.0.2-1731671193006:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:46:54,081 WARN [PacketResponder: BP-1235867767-172.17.0.2-1731671193006:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41789] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:54,081 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33724 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33724 dst: /127.0.0.1:41991 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:54,081 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:45324 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:41789:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45324 dst: /127.0.0.1:41789 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:54,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@304af6f5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:46:54,122 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41559526{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:46:54,122 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:46:54,122 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65349436{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:46:54,122 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c68f920{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,STOPPED} 2024-11-15T11:46:54,123 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:46:54,123 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:46:54,123 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:46:54,123 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1235867767-172.17.0.2-1731671193006 (Datanode Uuid c63d8502-ff64-4933-9fbb-ea5dca055eae) service to localhost/127.0.0.1:40139 2024-11-15T11:46:54,124 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data5/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:54,124 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data6/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:46:54,124 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:46:54,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46507 {}] regionserver.HRegion(8855): Flush requested on 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:54,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89ebd913c1105ed0d73496ad67266f67 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:46:54,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/fadb30125a4b425b89f23c32ef91fc18 is 1080, key is row0002/info:/1731671210072/Put/seqid=0 2024-11-15T11:46:54,154 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,154 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:54,154 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741844_1027 2024-11-15T11:46:54,155 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK] 2024-11-15T11:46:54,156 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,156 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK], DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:54,156 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741845_1028 2024-11-15T11:46:54,157 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] 2024-11-15T11:46:54,159 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43663 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,159 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33742 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741846_1029 to mirror 127.0.0.1:43663 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:54,159 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:54,159 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741846_1029 2024-11-15T11:46:54,159 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33742 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:46:54,159 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33742 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33742 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:54,160 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:54,161 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,161 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:46:54,161 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741847_1030 2024-11-15T11:46:54,162 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:46:54,163 WARN [IPC Server handler 2 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:46:54,163 WARN [IPC Server handler 2 on default port 40139 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:46:54,163 WARN [IPC Server handler 2 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:46:54,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741848_1031 (size=10347) 2024-11-15T11:46:54,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/fadb30125a4b425b89f23c32ef91fc18 2024-11-15T11:46:54,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/fadb30125a4b425b89f23c32ef91fc18 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fadb30125a4b425b89f23c32ef91fc18 2024-11-15T11:46:54,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fadb30125a4b425b89f23c32ef91fc18, entries=5, sequenceid=11, filesize=10.1 K 2024-11-15T11:46:54,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 89ebd913c1105ed0d73496ad67266f67 in 454ms, sequenceid=11, compaction requested=false 2024-11-15T11:46:54,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89ebd913c1105ed0d73496ad67266f67: 2024-11-15T11:46:54,691 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46507 {}] regionserver.HRegion(8855): Flush requested on 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:54,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89ebd913c1105ed0d73496ad67266f67 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-15T11:46:54,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/ba263eb48e5e4f4ebe122cb875f3fdd8 is 1080, key is row0007/info:/1731671214134/Put/seqid=0 2024-11-15T11:46:54,771 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33768 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741849_1032 to mirror 127.0.0.1:43663 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:54,771 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43663 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,771 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33768 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:46:54,771 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:54,771 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741849_1032 2024-11-15T11:46:54,771 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33768 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33768 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:54,772 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:54,774 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41789 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,774 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33782 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741850_1033 to mirror 127.0.0.1:41789 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:54,774 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:46:54,774 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741850_1033 2024-11-15T11:46:54,775 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33782 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:46:54,775 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33782 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33782 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:54,775 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:46:54,777 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,777 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:54,777 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741851_1034 2024-11-15T11:46:54,777 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] 2024-11-15T11:46:54,779 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:54,779 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:54,779 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741852_1035 2024-11-15T11:46:54,779 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK] 2024-11-15T11:46:54,780 WARN [IPC Server handler 2 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:46:54,780 WARN [IPC Server handler 2 on default port 40139 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:46:54,780 WARN [IPC Server handler 2 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:46:54,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741853_1036 (size=12506) 2024-11-15T11:46:54,992 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@20630d1e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741848_1031 to 127.0.0.1:41789 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:55,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/ba263eb48e5e4f4ebe122cb875f3fdd8 2024-11-15T11:46:55,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/ba263eb48e5e4f4ebe122cb875f3fdd8 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/ba263eb48e5e4f4ebe122cb875f3fdd8 2024-11-15T11:46:55,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/ba263eb48e5e4f4ebe122cb875f3fdd8, entries=7, sequenceid=24, filesize=12.2 K 2024-11-15T11:46:55,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 89ebd913c1105ed0d73496ad67266f67 in 441ms, sequenceid=24, compaction requested=false 2024-11-15T11:46:55,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89ebd913c1105ed0d73496ad67266f67: 2024-11-15T11:46:55,201 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-15T11:46:55,201 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:55,201 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/ba263eb48e5e4f4ebe122cb875f3fdd8 because midkey is the same as first or last row 2024-11-15T11:46:55,253 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,047 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,048 WARN [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]] 2024-11-15T11:46:56,048 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C46507%2C1731671195095:(num 1731671212023) roll requested 2024-11-15T11:46:56,049 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C46507%2C1731671195095.1731671216048 2024-11-15T11:46:56,055 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,056 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK], DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:56,056 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741854_1037 2024-11-15T11:46:56,057 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK] 2024-11-15T11:46:56,061 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43663 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,061 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33808 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741855_1038 to mirror 127.0.0.1:43663 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:56,061 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:56,061 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741855_1038 2024-11-15T11:46:56,061 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33808 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T11:46:56,061 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33808 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33808 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:56,062 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:56,064 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,065 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:46:56,065 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741856_1039 2024-11-15T11:46:56,065 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:46:56,068 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45005 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,068 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33818 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741857_1040 to mirror 127.0.0.1:45005 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:56,068 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:56,068 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741857_1040 2024-11-15T11:46:56,068 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33818 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T11:46:56,068 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33818 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33818 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:56,069 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] 2024-11-15T11:46:56,069 WARN [IPC Server handler 0 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:46:56,069 WARN [IPC Server handler 0 on default port 40139 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:46:56,070 WARN [IPC Server handler 0 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:46:56,072 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:56,072 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:56,072 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:56,073 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:56,073 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:56,073 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671212023 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671216048 2024-11-15T11:46:56,074 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38247:38247)] 2024-11-15T11:46:56,074 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 is not closed yet, will try archiving it next time 2024-11-15T11:46:56,074 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671212023 is not closed yet, will try archiving it next time 2024-11-15T11:46:56,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741843_1026 (size=25992) 2024-11-15T11:46:56,075 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671208004 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs/7adf9b3d9d04%2C46507%2C1731671195095.1731671208004 2024-11-15T11:46:56,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46507 {}] regionserver.HRegion(8855): Flush requested on 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:56,189 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89ebd913c1105ed0d73496ad67266f67 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T11:46:56,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/fb8139e34d984e88978400d19e28e64a is 1079, key is tmprow/info:/1731671216188/Put/seqid=0 2024-11-15T11:46:56,195 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,195 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:56,195 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741859_1042 2024-11-15T11:46:56,196 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK] 2024-11-15T11:46:56,197 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,197 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:46:56,197 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741860_1043 2024-11-15T11:46:56,198 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:46:56,199 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43663 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33836 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741861_1044 to mirror 127.0.0.1:43663 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:56,200 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:56,200 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741861_1044 2024-11-15T11:46:56,200 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33836 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:46:56,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33836 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33836 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:56,200 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:56,202 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45005 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,202 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33844 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741862_1045 to mirror 127.0.0.1:45005 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:56,202 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:56,203 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741862_1045 2024-11-15T11:46:56,203 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33844 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:46:56,203 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33844 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33844 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:56,203 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] 2024-11-15T11:46:56,204 WARN [IPC Server handler 3 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:46:56,204 WARN [IPC Server handler 3 on default port 40139 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:46:56,204 WARN [IPC Server handler 3 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:46:56,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741863_1046 (size=6027) 2024-11-15T11:46:56,476 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 is not closed yet, will try archiving it next time 2024-11-15T11:46:56,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/fb8139e34d984e88978400d19e28e64a 2024-11-15T11:46:56,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/fb8139e34d984e88978400d19e28e64a as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fb8139e34d984e88978400d19e28e64a 2024-11-15T11:46:56,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fb8139e34d984e88978400d19e28e64a, entries=1, sequenceid=34, filesize=5.9 K 2024-11-15T11:46:56,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 89ebd913c1105ed0d73496ad67266f67 in 439ms, sequenceid=34, compaction requested=true 2024-11-15T11:46:56,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89ebd913c1105ed0d73496ad67266f67: 2024-11-15T11:46:56,628 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-15T11:46:56,628 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:56,629 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/ba263eb48e5e4f4ebe122cb875f3fdd8 because midkey is the same as first or last row 2024-11-15T11:46:56,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 89ebd913c1105ed0d73496ad67266f67:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:46:56,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:46:56,629 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:46:56,631 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:46:56,631 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HStore(1541): 89ebd913c1105ed0d73496ad67266f67/info is initiating minor compaction (all files) 2024-11-15T11:46:56,631 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 89ebd913c1105ed0d73496ad67266f67/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:46:56,632 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fadb30125a4b425b89f23c32ef91fc18, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/ba263eb48e5e4f4ebe122cb875f3fdd8, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fb8139e34d984e88978400d19e28e64a] into tmpdir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp, totalSize=28.2 K 2024-11-15T11:46:56,632 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.Compactor(225): Compacting fadb30125a4b425b89f23c32ef91fc18, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731671210072 2024-11-15T11:46:56,633 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.Compactor(225): Compacting ba263eb48e5e4f4ebe122cb875f3fdd8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731671214134 2024-11-15T11:46:56,633 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb8139e34d984e88978400d19e28e64a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731671216188 2024-11-15T11:46:56,650 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 89ebd913c1105ed0d73496ad67266f67#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:46:56,651 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/8c03a83727a8433f9404ef82013baee0 is 1080, key is row0002/info:/1731671210072/Put/seqid=0 2024-11-15T11:46:56,653 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,653 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:46:56,653 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741864_1047 2024-11-15T11:46:56,654 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:46:56,655 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,655 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:56,655 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741865_1048 2024-11-15T11:46:56,655 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] 2024-11-15T11:46:56,657 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,657 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:56,657 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741866_1049 2024-11-15T11:46:56,657 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:56,658 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:56,658 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:56,658 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741867_1050 2024-11-15T11:46:56,659 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK] 2024-11-15T11:46:56,660 WARN [IPC Server handler 1 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:46:56,660 WARN [IPC Server handler 1 on default port 40139 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:46:56,660 WARN [IPC Server handler 1 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:46:56,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741868_1051 (size=17994) 2024-11-15T11:46:56,691 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:57,073 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/8c03a83727a8433f9404ef82013baee0 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/8c03a83727a8433f9404ef82013baee0 2024-11-15T11:46:57,083 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 89ebd913c1105ed0d73496ad67266f67/info of 89ebd913c1105ed0d73496ad67266f67 into 8c03a83727a8433f9404ef82013baee0(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:46:57,083 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 89ebd913c1105ed0d73496ad67266f67: 2024-11-15T11:46:57,083 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67., storeName=89ebd913c1105ed0d73496ad67266f67/info, priority=13, startTime=1731671216629; duration=0sec 2024-11-15T11:46:57,083 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T11:46:57,083 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:57,084 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/8c03a83727a8433f9404ef82013baee0 because midkey is the same as first or last row 2024-11-15T11:46:57,084 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T11:46:57,084 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:57,084 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/8c03a83727a8433f9404ef82013baee0 because midkey is the same as first or last row 2024-11-15T11:46:57,084 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T11:46:57,084 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:57,084 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/8c03a83727a8433f9404ef82013baee0 because midkey is the same as first or last row 2024-11-15T11:46:57,084 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:46:57,084 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 89ebd913c1105ed0d73496ad67266f67:info 2024-11-15T11:46:57,254 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:57,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46507 {}] regionserver.HRegion(8855): Flush requested on 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:57,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89ebd913c1105ed0d73496ad67266f67 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T11:46:57,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/d8e4c64316d646f8b0e93d4536ef0449 is 1079, key is tmprow/info:/1731671217609/Put/seqid=0 2024-11-15T11:46:57,616 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:57,616 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:57,616 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741869_1052 2024-11-15T11:46:57,617 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:57,618 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:57,618 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:57,618 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741870_1053 2024-11-15T11:46:57,619 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] 2024-11-15T11:46:57,621 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42575 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:57,620 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33894 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741871_1054 to mirror 127.0.0.1:42575 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:57,621 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:57,621 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741871_1054 2024-11-15T11:46:57,621 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33894 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:46:57,621 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33894 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33894 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:57,621 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK] 2024-11-15T11:46:57,624 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41789 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:57,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33898 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741872_1055 to mirror 127.0.0.1:41789 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:57,624 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:46:57,624 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741872_1055 2024-11-15T11:46:57,624 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33898 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:46:57,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33898 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33898 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:57,624 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:46:57,625 WARN [IPC Server handler 4 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:46:57,625 WARN [IPC Server handler 4 on default port 40139 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:46:57,625 WARN [IPC Server handler 4 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:46:57,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741873_1056 (size=6027) 2024-11-15T11:46:57,987 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@20630d1e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741853_1036 to 127.0.0.1:45005 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:57,987 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3b437f59[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741843_1026 to 127.0.0.1:41789 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:58,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/d8e4c64316d646f8b0e93d4536ef0449 2024-11-15T11:46:58,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/d8e4c64316d646f8b0e93d4536ef0449 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/d8e4c64316d646f8b0e93d4536ef0449 2024-11-15T11:46:58,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/d8e4c64316d646f8b0e93d4536ef0449, entries=1, sequenceid=45, filesize=5.9 K 2024-11-15T11:46:58,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 89ebd913c1105ed0d73496ad67266f67 in 434ms, sequenceid=45, compaction requested=false 2024-11-15T11:46:58,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89ebd913c1105ed0d73496ad67266f67: 2024-11-15T11:46:58,044 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-15T11:46:58,044 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:58,044 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/8c03a83727a8433f9404ef82013baee0 because midkey is the same as first or last row 2024-11-15T11:46:58,075 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:58,075 WARN [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]] 2024-11-15T11:46:58,075 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C46507%2C1731671195095:(num 1731671216048) roll requested 2024-11-15T11:46:58,076 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C46507%2C1731671195095.1731671218075 2024-11-15T11:46:58,080 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:58,080 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:58,081 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741874_1057 2024-11-15T11:46:58,082 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] 2024-11-15T11:46:58,084 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:58,084 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:58,084 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741875_1058 2024-11-15T11:46:58,085 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:58,089 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42575 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:58,089 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33916 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741876_1059 to mirror 127.0.0.1:42575 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:58,090 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:58,090 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741876_1059 2024-11-15T11:46:58,090 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33916 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T11:46:58,090 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33916 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33916 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:58,091 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK] 2024-11-15T11:46:58,092 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:58,093 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:46:58,093 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741877_1060 2024-11-15T11:46:58,093 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:46:58,094 WARN [IPC Server handler 4 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:46:58,094 WARN [IPC Server handler 4 on default port 40139 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:46:58,094 WARN [IPC Server handler 4 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:46:58,097 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:58,097 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:58,097 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:58,097 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:58,097 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:46:58,097 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671216048 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671218075 2024-11-15T11:46:58,099 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38247:38247)] 2024-11-15T11:46:58,099 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 is not closed yet, will try archiving it next time 2024-11-15T11:46:58,099 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671216048 is not closed yet, will try archiving it next time 2024-11-15T11:46:58,099 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671212023 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs/7adf9b3d9d04%2C46507%2C1731671195095.1731671212023 2024-11-15T11:46:58,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741858_1041 (size=13591) 2024-11-15T11:46:58,502 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 is not closed yet, will try archiving it next time 2024-11-15T11:46:58,692 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:58,985 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@20630d1e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741868_1051 to 127.0.0.1:42575 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:58,985 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3b437f59[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741863_1046 to 127.0.0.1:45005 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:59,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46507 {}] regionserver.HRegion(8855): Flush requested on 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:46:59,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89ebd913c1105ed0d73496ad67266f67 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T11:46:59,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/886876d8349946db91faec9f0c7fd72b is 1079, key is tmprow/info:/1731671219034/Put/seqid=0 2024-11-15T11:46:59,043 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:59,043 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:59,044 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741879_1062 2024-11-15T11:46:59,044 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:59,045 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:59,045 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:46:59,045 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741880_1063 2024-11-15T11:46:59,046 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:46:59,047 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:59,047 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:59,047 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741881_1064 2024-11-15T11:46:59,048 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK] 2024-11-15T11:46:59,050 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45005 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:59,050 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33928 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741882_1065 to mirror 127.0.0.1:45005 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:59,050 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:59,050 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741882_1065 2024-11-15T11:46:59,050 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33928 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:46:59,050 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33928 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33928 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:59,051 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] 2024-11-15T11:46:59,051 WARN [IPC Server handler 4 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:46:59,051 WARN [IPC Server handler 4 on default port 40139 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:46:59,051 WARN [IPC Server handler 4 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:46:59,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741883_1066 (size=6027) 2024-11-15T11:46:59,254 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:59,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/886876d8349946db91faec9f0c7fd72b 2024-11-15T11:46:59,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/886876d8349946db91faec9f0c7fd72b as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/886876d8349946db91faec9f0c7fd72b 2024-11-15T11:46:59,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/886876d8349946db91faec9f0c7fd72b, entries=1, sequenceid=55, filesize=5.9 K 2024-11-15T11:46:59,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 89ebd913c1105ed0d73496ad67266f67 in 435ms, sequenceid=55, compaction requested=true 2024-11-15T11:46:59,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89ebd913c1105ed0d73496ad67266f67: 2024-11-15T11:46:59,470 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-15T11:46:59,470 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:59,470 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/8c03a83727a8433f9404ef82013baee0 because midkey is the same as first or last row 2024-11-15T11:46:59,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 89ebd913c1105ed0d73496ad67266f67:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:46:59,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:46:59,471 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:46:59,472 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:46:59,472 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HStore(1541): 89ebd913c1105ed0d73496ad67266f67/info is initiating minor compaction (all files) 2024-11-15T11:46:59,473 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 89ebd913c1105ed0d73496ad67266f67/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:46:59,473 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/8c03a83727a8433f9404ef82013baee0, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/d8e4c64316d646f8b0e93d4536ef0449, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/886876d8349946db91faec9f0c7fd72b] into tmpdir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp, totalSize=29.3 K 2024-11-15T11:46:59,473 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8c03a83727a8433f9404ef82013baee0, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731671210072 2024-11-15T11:46:59,474 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.Compactor(225): Compacting d8e4c64316d646f8b0e93d4536ef0449, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731671217609 2024-11-15T11:46:59,474 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.Compactor(225): Compacting 886876d8349946db91faec9f0c7fd72b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731671219034 2024-11-15T11:46:59,491 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 89ebd913c1105ed0d73496ad67266f67#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:46:59,492 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/e026615115cd46d599e6f2c411598f7d is 1080, key is row0002/info:/1731671210072/Put/seqid=0 2024-11-15T11:46:59,494 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:59,494 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK], DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]) is bad. 2024-11-15T11:46:59,494 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741884_1067 2024-11-15T11:46:59,494 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK] 2024-11-15T11:46:59,496 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45005 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:59,496 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33956 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741885_1068 to mirror 127.0.0.1:45005 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:59,497 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]) is bad. 2024-11-15T11:46:59,497 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741885_1068 2024-11-15T11:46:59,497 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33956 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:46:59,497 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33956 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33956 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:59,497 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45005,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK] 2024-11-15T11:46:59,499 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42575 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:59,499 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33964 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741886_1069 to mirror 127.0.0.1:42575 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:59,500 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK]) is bad. 2024-11-15T11:46:59,500 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741886_1069 2024-11-15T11:46:59,500 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33964 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:46:59,500 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:33964 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33964 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:46:59,500 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42575,DS-8223df78-bb6e-495e-8fa5-67ed2d3d6386,DISK] 2024-11-15T11:46:59,502 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:46:59,502 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:46:59,502 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741887_1070 2024-11-15T11:46:59,502 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:46:59,503 WARN [IPC Server handler 4 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:46:59,503 WARN [IPC Server handler 4 on default port 40139 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:46:59,503 WARN [IPC Server handler 4 on default port 40139 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:46:59,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741888_1071 (size=18097) 2024-11-15T11:46:59,921 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/e026615115cd46d599e6f2c411598f7d as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/e026615115cd46d599e6f2c411598f7d 2024-11-15T11:46:59,931 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 89ebd913c1105ed0d73496ad67266f67/info of 89ebd913c1105ed0d73496ad67266f67 into e026615115cd46d599e6f2c411598f7d(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:46:59,931 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 89ebd913c1105ed0d73496ad67266f67: 2024-11-15T11:46:59,931 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67., storeName=89ebd913c1105ed0d73496ad67266f67/info, priority=13, startTime=1731671219471; duration=0sec 2024-11-15T11:46:59,931 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T11:46:59,931 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:59,931 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/e026615115cd46d599e6f2c411598f7d because midkey is the same as first or last row 2024-11-15T11:46:59,932 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T11:46:59,932 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:59,932 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/e026615115cd46d599e6f2c411598f7d because midkey is the same as first or last row 2024-11-15T11:46:59,932 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T11:46:59,932 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:46:59,932 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/e026615115cd46d599e6f2c411598f7d because midkey is the same as first or last row 2024-11-15T11:46:59,932 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:46:59,932 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 89ebd913c1105ed0d73496ad67266f67:info 2024-11-15T11:47:00,099 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:00,099 WARN [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-15T11:47:00,267 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:00,270 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:00,271 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:00,271 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:00,271 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:47:00,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22c6c03b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:00,272 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54f27916{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:00,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@575d7e2f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/java.io.tmpdir/jetty-localhost-38225-hadoop-hdfs-3_4_1-tests_jar-_-any-6824555171908690422/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:00,368 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39eaf0e6{HTTP/1.1, (http/1.1)}{localhost:38225} 2024-11-15T11:47:00,368 INFO [Time-limited test {}] server.Server(415): Started @136122ms 2024-11-15T11:47:00,370 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:47:00,692 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:00,782 WARN [Thread-991 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:47:00,790 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd78f235f31f4fd3 with lease ID 0x5b612b60ecdb6320: from storage DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b node DatanodeRegistration(127.0.0.1:45125, datanodeUuid=053e9b0a-0ad1-4ae0-be32-c63262dd71be, infoPort=45045, infoSecurePort=0, ipcPort=40387, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:47:00,791 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd78f235f31f4fd3 with lease ID 0x5b612b60ecdb6320: from storage DS-f2f46fdc-cde6-48ff-ab65-bed20d14a134 node DatanodeRegistration(127.0.0.1:45125, datanodeUuid=053e9b0a-0ad1-4ae0-be32-c63262dd71be, infoPort=45045, infoSecurePort=0, ipcPort=40387, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:47:00,986 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@20630d1e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741858_1041 to 127.0.0.1:42575 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:00,986 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3b437f59[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741873_1056 to 127.0.0.1:43663 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:01,255 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:01,989 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3b437f59[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41991, datanodeUuid=185575bb-4573-42fd-888e-d597e7952c60, infoPort=38247, infoSecurePort=0, ipcPort=40257, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741888_1071 to 127.0.0.1:43663 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:01,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741883_1066 (size=6027) 2024-11-15T11:47:02,100 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:02,692 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:03,255 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:04,100 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:04,693 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:04,847 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:47:05,256 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:05,483 ERROR [FSHLog-0-hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData-prefix:7adf9b3d9d04,34851,1731671194886 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:05,483 WARN [FSHLog-0-hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData-prefix:7adf9b3d9d04,34851,1731671194886 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:05,484 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C34851%2C1731671194886:(num 1731671195275) roll requested 2024-11-15T11:47:05,484 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C34851%2C1731671194886.1731671225484 2024-11-15T11:47:05,487 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:05,487 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:45125,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:47:05,487 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741889_1072 2024-11-15T11:47:05,488 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:47:05,495 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:05,495 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:05,495 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:05,495 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:05,495 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:05,496 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671225484 2024-11-15T11:47:05,496 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:05,496 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:05,497 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 2024-11-15T11:47:05,497 WARN [IPC Server handler 2 on default port 40139 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1006 2024-11-15T11:47:05,497 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 after 0ms 2024-11-15T11:47:05,501 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45045:45045),(127.0.0.1/127.0.0.1:38247:38247)] 2024-11-15T11:47:05,501 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 is not closed yet, will try archiving it next time 2024-11-15T11:47:06,101 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:06,694 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:08,101 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:08,694 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:09,499 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 after 4002ms 2024-11-15T11:47:10,101 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:10,694 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:10,805 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24933e1b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1235867767-172.17.0.2-1731671193006:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:43663,null,null]) java.net.ConnectException: Call From 7adf9b3d9d04/172.17.0.2 to localhost:44705 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T11:47:10,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741833_1020 (size=455) 2024-11-15T11:47:11,047 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs/7adf9b3d9d04%2C46507%2C1731671195095.1731671195781 2024-11-15T11:47:11,049 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671216048 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs/7adf9b3d9d04%2C46507%2C1731671195095.1731671216048 2024-11-15T11:47:12,102 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:12,695 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:12,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741833_1020 (size=455) 2024-11-15T11:47:13,764 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C46507%2C1731671195095.1731671233764 2024-11-15T11:47:13,772 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2073313263_22 at /127.0.0.1:52802 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data4]'}, localName='127.0.0.1:45125', datanodeUuid='053e9b0a-0ad1-4ae0-be32-c63262dd71be', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741891_1075 to mirror 127.0.0.1:41789 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:13,772 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41789 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:13,772 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45125,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK], DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:47:13,773 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741891_1075 2024-11-15T11:47:13,773 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2073313263_22 at /127.0.0.1:52802 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T11:47:13,773 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2073313263_22 at /127.0.0.1:52802 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:45125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52802 dst: /127.0.0.1:45125 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:13,774 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:47:13,779 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:13,779 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:13,780 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:13,780 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:13,780 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:13,780 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671218075 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671233764 2024-11-15T11:47:13,781 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45045:45045),(127.0.0.1/127.0.0.1:38247:38247)] 2024-11-15T11:47:13,781 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671218075 is not closed yet, will try archiving it next time 2024-11-15T11:47:13,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741878_1061 (size=12911) 2024-11-15T11:47:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46507 {}] regionserver.HRegion(8855): Flush requested on 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:47:13,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89ebd913c1105ed0d73496ad67266f67 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T11:47:13,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/3cc076248eb94bddb46e37f2774f2570 is 1080, key is row0013/info:/1731671233783/Put/seqid=0 2024-11-15T11:47:13,795 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:13,795 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:45125,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:47:13,795 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741893_1077 2024-11-15T11:47:13,796 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:47:13,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741894_1078 (size=8190) 2024-11-15T11:47:13,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741894_1078 (size=8190) 2024-11-15T11:47:13,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/3cc076248eb94bddb46e37f2774f2570 2024-11-15T11:47:13,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/3cc076248eb94bddb46e37f2774f2570 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/3cc076248eb94bddb46e37f2774f2570 2024-11-15T11:47:13,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/3cc076248eb94bddb46e37f2774f2570, entries=3, sequenceid=66, filesize=8.0 K 2024-11-15T11:47:13,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 89ebd913c1105ed0d73496ad67266f67 in 33ms, sequenceid=66, compaction requested=false 2024-11-15T11:47:13,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89ebd913c1105ed0d73496ad67266f67: 2024-11-15T11:47:13,819 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-15T11:47:13,819 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:47:13,819 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/e026615115cd46d599e6f2c411598f7d because midkey is the same as first or last row 2024-11-15T11:47:14,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46507 {}] regionserver.HRegion(8855): Flush requested on 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:47:14,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89ebd913c1105ed0d73496ad67266f67 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-15T11:47:14,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/b3f54fb8559040c68634186e860deca7 is 1080, key is row0015/info:/1731671233788/Put/seqid=0 2024-11-15T11:47:14,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741895_1079 (size=14660) 2024-11-15T11:47:14,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741895_1079 (size=14660) 2024-11-15T11:47:14,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/b3f54fb8559040c68634186e860deca7 2024-11-15T11:47:14,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/b3f54fb8559040c68634186e860deca7 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/b3f54fb8559040c68634186e860deca7 2024-11-15T11:47:14,050 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/b3f54fb8559040c68634186e860deca7, entries=9, sequenceid=79, filesize=14.3 K 2024-11-15T11:47:14,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 89ebd913c1105ed0d73496ad67266f67 in 33ms, sequenceid=79, compaction requested=true 2024-11-15T11:47:14,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89ebd913c1105ed0d73496ad67266f67: 2024-11-15T11:47:14,052 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-15T11:47:14,052 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:47:14,052 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/e026615115cd46d599e6f2c411598f7d because midkey is the same as first or last row 2024-11-15T11:47:14,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 89ebd913c1105ed0d73496ad67266f67:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:47:14,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:47:14,052 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:47:14,053 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:47:14,053 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HStore(1541): 89ebd913c1105ed0d73496ad67266f67/info is initiating minor compaction (all files) 2024-11-15T11:47:14,053 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 89ebd913c1105ed0d73496ad67266f67/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:47:14,053 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/e026615115cd46d599e6f2c411598f7d, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/3cc076248eb94bddb46e37f2774f2570, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/b3f54fb8559040c68634186e860deca7] into tmpdir=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp, totalSize=40.0 K 2024-11-15T11:47:14,054 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.Compactor(225): Compacting e026615115cd46d599e6f2c411598f7d, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731671210072 2024-11-15T11:47:14,054 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3cc076248eb94bddb46e37f2774f2570, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731671220055 2024-11-15T11:47:14,055 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] compactions.Compactor(225): Compacting b3f54fb8559040c68634186e860deca7, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731671233788 2024-11-15T11:47:14,065 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 89ebd913c1105ed0d73496ad67266f67#info#compaction#27 average throughput is 22.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:47:14,066 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/53f5c67713b94511adea9c0fcdc44811 is 1080, key is row0002/info:/1731671210072/Put/seqid=0 2024-11-15T11:47:14,068 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41789 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,068 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:50584 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741896_1080] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10]'}, localName='127.0.0.1:41991', datanodeUuid='185575bb-4573-42fd-888e-d597e7952c60', xmitsInProgress=0}:Exception transferring block BP-1235867767-172.17.0.2-1731671193006:blk_1073741896_1080 to mirror 127.0.0.1:41789 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:14,069 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK], DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:47:14,069 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:50584 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741896_1080] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T11:47:14,069 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741896_1080 2024-11-15T11:47:14,069 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_631528300_22 at /127.0.0.1:50584 [Receiving block BP-1235867767-172.17.0.2-1731671193006:blk_1073741896_1080] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50584 dst: /127.0.0.1:41991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:14,069 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:47:14,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741897_1081 (size=28989) 2024-11-15T11:47:14,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741897_1081 (size=28989) 2024-11-15T11:47:14,081 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/.tmp/info/53f5c67713b94511adea9c0fcdc44811 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/53f5c67713b94511adea9c0fcdc44811 2024-11-15T11:47:14,088 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 89ebd913c1105ed0d73496ad67266f67/info of 89ebd913c1105ed0d73496ad67266f67 into 53f5c67713b94511adea9c0fcdc44811(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:47:14,088 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 89ebd913c1105ed0d73496ad67266f67: 2024-11-15T11:47:14,088 INFO [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67., storeName=89ebd913c1105ed0d73496ad67266f67/info, priority=13, startTime=1731671234052; duration=0sec 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/53f5c67713b94511adea9c0fcdc44811 because midkey is the same as first or last row 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/53f5c67713b94511adea9c0fcdc44811 because midkey is the same as first or last row 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/53f5c67713b94511adea9c0fcdc44811 because midkey is the same as first or last row 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:47:14,089 DEBUG [RS:0;7adf9b3d9d04:46507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 89ebd913c1105ed0d73496ad67266f67:info 2024-11-15T11:47:14,102 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,102 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-15T11:47:14,184 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.1731671218075 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs/7adf9b3d9d04%2C46507%2C1731671195095.1731671218075 2024-11-15T11:47:14,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T11:47:14,220 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:47:14,221 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:47:14,221 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:14,222 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:14,222 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:47:14,222 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T11:47:14,222 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=806474155, stopped=false 2024-11-15T11:47:14,223 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7adf9b3d9d04,34851,1731671194886 2024-11-15T11:47:14,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:47:14,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:47:14,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:14,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:14,284 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:47:14,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39827-0x1013f9b0fa70002, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:47:14,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39827-0x1013f9b0fa70002, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:14,284 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:47:14,285 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:47:14,286 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:14,286 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:47:14,287 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39827-0x1013f9b0fa70002, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:47:14,287 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7adf9b3d9d04,46507,1731671195095' ***** 2024-11-15T11:47:14,287 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:47:14,287 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7adf9b3d9d04,39827,1731671196494' ***** 2024-11-15T11:47:14,287 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:47:14,287 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:47:14,288 INFO [RS:0;7adf9b3d9d04:46507 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:47:14,288 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:47:14,288 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:47:14,288 INFO [RS:0;7adf9b3d9d04:46507 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:47:14,288 INFO [RS:1;7adf9b3d9d04:39827 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:47:14,288 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:47:14,288 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(3091): Received CLOSE for 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:47:14,288 INFO [RS:1;7adf9b3d9d04:39827 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:47:14,288 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(959): stopping server 7adf9b3d9d04,39827,1731671196494 2024-11-15T11:47:14,288 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:47:14,288 INFO [RS:1;7adf9b3d9d04:39827 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;7adf9b3d9d04:39827. 2024-11-15T11:47:14,288 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(959): stopping server 7adf9b3d9d04,46507,1731671195095 2024-11-15T11:47:14,288 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:47:14,288 DEBUG [RS:1;7adf9b3d9d04:39827 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:47:14,288 DEBUG [RS:1;7adf9b3d9d04:39827 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:14,288 INFO [RS:0;7adf9b3d9d04:46507 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7adf9b3d9d04:46507. 2024-11-15T11:47:14,289 DEBUG [RS:0;7adf9b3d9d04:46507 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:47:14,289 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(976): stopping server 7adf9b3d9d04,39827,1731671196494; all regions closed. 2024-11-15T11:47:14,289 DEBUG [RS:0;7adf9b3d9d04:46507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:14,289 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:47:14,289 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:47:14,289 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:47:14,289 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T11:47:14,289 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 89ebd913c1105ed0d73496ad67266f67, disabling compactions & flushes 2024-11-15T11:47:14,289 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:47:14,289 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:47:14,289 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:47:14,290 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T11:47:14,290 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,290 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. after waiting 0 ms 2024-11-15T11:47:14,290 DEBUG [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 89ebd913c1105ed0d73496ad67266f67=TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.} 2024-11-15T11:47:14,290 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:47:14,290 DEBUG [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 89ebd913c1105ed0d73496ad67266f67 2024-11-15T11:47:14,290 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:47:14,290 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:47:14,290 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:47:14,290 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:47:14,290 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:47:14,290 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-15T11:47:14,290 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fadb30125a4b425b89f23c32ef91fc18, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/ba263eb48e5e4f4ebe122cb875f3fdd8, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/8c03a83727a8433f9404ef82013baee0, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fb8139e34d984e88978400d19e28e64a, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/d8e4c64316d646f8b0e93d4536ef0449, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/e026615115cd46d599e6f2c411598f7d, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/886876d8349946db91faec9f0c7fd72b, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/3cc076248eb94bddb46e37f2774f2570, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/b3f54fb8559040c68634186e860deca7] to archive 2024-11-15T11:47:14,291 ERROR [FSHLog-0-hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2-prefix:7adf9b3d9d04,46507,1731671195095.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,291 WARN [FSHLog-0-hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2-prefix:7adf9b3d9d04,46507,1731671195095.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,291 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C46507%2C1731671195095.meta:.meta(num 1731671196199) roll requested 2024-11-15T11:47:14,292 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,292 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671234291.meta 2024-11-15T11:47:14,292 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,292 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,292 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,292 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T11:47:14,293 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,293 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,293 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 2024-11-15T11:47:14,293 WARN [IPC Server handler 4 on default port 40139 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741837_1013 2024-11-15T11:47:14,294 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 after 1ms 2024-11-15T11:47:14,294 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fadb30125a4b425b89f23c32ef91fc18 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fadb30125a4b425b89f23c32ef91fc18 2024-11-15T11:47:14,296 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/ba263eb48e5e4f4ebe122cb875f3fdd8 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/ba263eb48e5e4f4ebe122cb875f3fdd8 2024-11-15T11:47:14,297 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/8c03a83727a8433f9404ef82013baee0 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/8c03a83727a8433f9404ef82013baee0 2024-11-15T11:47:14,298 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fb8139e34d984e88978400d19e28e64a to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/fb8139e34d984e88978400d19e28e64a 2024-11-15T11:47:14,300 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/d8e4c64316d646f8b0e93d4536ef0449 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/d8e4c64316d646f8b0e93d4536ef0449 2024-11-15T11:47:14,301 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/e026615115cd46d599e6f2c411598f7d to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/e026615115cd46d599e6f2c411598f7d 2024-11-15T11:47:14,302 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/886876d8349946db91faec9f0c7fd72b to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/886876d8349946db91faec9f0c7fd72b 2024-11-15T11:47:14,303 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/3cc076248eb94bddb46e37f2774f2570 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/3cc076248eb94bddb46e37f2774f2570 2024-11-15T11:47:14,304 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/b3f54fb8559040c68634186e860deca7 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/info/b3f54fb8559040c68634186e860deca7 2024-11-15T11:47:14,305 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7adf9b3d9d04:34851 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-15T11:47:14,305 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [fadb30125a4b425b89f23c32ef91fc18=10347, ba263eb48e5e4f4ebe122cb875f3fdd8=12506, 8c03a83727a8433f9404ef82013baee0=17994, fb8139e34d984e88978400d19e28e64a=6027, d8e4c64316d646f8b0e93d4536ef0449=6027, e026615115cd46d599e6f2c411598f7d=18097, 886876d8349946db91faec9f0c7fd72b=6027, 3cc076248eb94bddb46e37f2774f2570=8190, b3f54fb8559040c68634186e860deca7=14660] 2024-11-15T11:47:14,311 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,311 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,311 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,311 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,311 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,311 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671234291.meta 2024-11-15T11:47:14,316 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,316 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43663,DS-73a9b79b-0597-4c5e-9e1d-964c7338721e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,316 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta 2024-11-15T11:47:14,317 WARN [IPC Server handler 1 on default port 40139 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta has not been closed. Lease recovery is in progress. RecoveryId = 1084 for block blk_1073741834_1010 2024-11-15T11:47:14,317 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/default/TestLogRolling-testLogRollOnDatanodeDeath/89ebd913c1105ed0d73496ad67266f67/recovered.edits/83.seqid, newMaxSeqId=83, maxSeqId=1 2024-11-15T11:47:14,317 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta after 1ms 2024-11-15T11:47:14,317 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:47:14,317 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 89ebd913c1105ed0d73496ad67266f67: Waiting for close lock at 1731671234289Running coprocessor pre-close hooks at 1731671234289Disabling compacts and flushes for region at 1731671234289Disabling writes for close at 1731671234290 (+1 ms)Writing region close event to WAL at 1731671234310 (+20 ms)Running coprocessor post-close hooks at 1731671234317 (+7 ms)Closed at 1731671234317 2024-11-15T11:47:14,318 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67. 2024-11-15T11:47:14,319 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45045:45045),(127.0.0.1/127.0.0.1:38247:38247)] 2024-11-15T11:47:14,319 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta is not closed yet, will try archiving it next time 2024-11-15T11:47:14,344 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/.tmp/info/8b2d6062ec214d61a88b49d5c0dc5072 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731671196722.89ebd913c1105ed0d73496ad67266f67./info:regioninfo/1731671197496/Put/seqid=0 2024-11-15T11:47:14,346 WARN [Thread-1059 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,346 WARN [Thread-1059 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:45125,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:47:14,346 WARN [Thread-1059 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741899_1085 2024-11-15T11:47:14,346 WARN [Thread-1059 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:47:14,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741900_1086 (size=7089) 2024-11-15T11:47:14,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741900_1086 (size=7089) 2024-11-15T11:47:14,490 DEBUG [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-15T11:47:14,690 DEBUG [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-15T11:47:14,692 INFO [regionserver/7adf9b3d9d04:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:47:14,709 INFO [regionserver/7adf9b3d9d04:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T11:47:14,710 INFO [regionserver/7adf9b3d9d04:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T11:47:14,750 INFO [regionserver/7adf9b3d9d04:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T11:47:14,750 INFO [regionserver/7adf9b3d9d04:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T11:47:14,753 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/.tmp/info/8b2d6062ec214d61a88b49d5c0dc5072 2024-11-15T11:47:14,783 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/.tmp/ns/3b9aa7cb81704780b2fb5e47c3de61fa is 43, key is default/ns:d/1731671196356/Put/seqid=0 2024-11-15T11:47:14,785 WARN [Thread-1065 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,785 WARN [Thread-1065 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:45125,DS-9310d3a5-ea0c-4044-992b-ea5c3406c08b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:47:14,785 WARN [Thread-1065 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741901_1087 2024-11-15T11:47:14,786 WARN [Thread-1065 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:47:14,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741902_1088 (size=5153) 2024-11-15T11:47:14,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741902_1088 (size=5153) 2024-11-15T11:47:14,790 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/.tmp/ns/3b9aa7cb81704780b2fb5e47c3de61fa 2024-11-15T11:47:14,812 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/.tmp/table/ff4c91a02d3c491a86d15f7fab88808d is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731671197507/Put/seqid=0 2024-11-15T11:47:14,814 WARN [Thread-1072 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:14,814 WARN [Thread-1072 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1235867767-172.17.0.2-1731671193006:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK], DatanodeInfoWithStorage[127.0.0.1:41991,DS-566e9c75-d247-4149-ba27-165b7c1bc5e2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK]) is bad. 2024-11-15T11:47:14,814 WARN [Thread-1072 {}] hdfs.DataStreamer(1850): Abandoning BP-1235867767-172.17.0.2-1731671193006:blk_1073741903_1089 2024-11-15T11:47:14,815 WARN [Thread-1072 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41789,DS-8f9117d5-d7ec-4c6b-9b22-28e091c54687,DISK] 2024-11-15T11:47:14,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741904_1090 (size=5424) 2024-11-15T11:47:14,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741904_1090 (size=5424) 2024-11-15T11:47:14,819 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/.tmp/table/ff4c91a02d3c491a86d15f7fab88808d 2024-11-15T11:47:14,825 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/.tmp/info/8b2d6062ec214d61a88b49d5c0dc5072 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/info/8b2d6062ec214d61a88b49d5c0dc5072 2024-11-15T11:47:14,831 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/info/8b2d6062ec214d61a88b49d5c0dc5072, entries=10, sequenceid=11, filesize=6.9 K 2024-11-15T11:47:14,832 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/.tmp/ns/3b9aa7cb81704780b2fb5e47c3de61fa as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/ns/3b9aa7cb81704780b2fb5e47c3de61fa 2024-11-15T11:47:14,837 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/ns/3b9aa7cb81704780b2fb5e47c3de61fa, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T11:47:14,838 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/.tmp/table/ff4c91a02d3c491a86d15f7fab88808d as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/table/ff4c91a02d3c491a86d15f7fab88808d 2024-11-15T11:47:14,844 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/table/ff4c91a02d3c491a86d15f7fab88808d, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T11:47:14,845 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 555ms, sequenceid=11, compaction requested=false 2024-11-15T11:47:14,849 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T11:47:14,850 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:47:14,850 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:47:14,850 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671234290Running coprocessor pre-close hooks at 1731671234290Disabling compacts and flushes for region at 1731671234290Disabling writes for close at 1731671234290Obtaining lock to block concurrent updates at 1731671234290Preparing flush snapshotting stores in 1588230740 at 1731671234290Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731671234291 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731671234319 (+28 ms)Flushing 1588230740/info: creating writer at 1731671234319Flushing 1588230740/info: appending metadata at 1731671234343 (+24 ms)Flushing 1588230740/info: closing flushed file at 1731671234343Flushing 1588230740/ns: creating writer at 1731671234767 (+424 ms)Flushing 1588230740/ns: appending metadata at 1731671234783 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731671234783Flushing 1588230740/table: creating writer at 1731671234796 (+13 ms)Flushing 1588230740/table: appending metadata at 1731671234812 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731671234812Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d182704: reopening flushed file at 1731671234824 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40f3cea6: reopening flushed file at 1731671234831 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2dbb93ac: reopening flushed file at 1731671234837 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 555ms, sequenceid=11, compaction requested=false at 1731671234845 (+8 ms)Writing region close event to WAL at 1731671234846 (+1 ms)Running coprocessor post-close hooks at 1731671234850 (+4 ms)Closed at 1731671234850 2024-11-15T11:47:14,850 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T11:47:14,891 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(976): stopping server 7adf9b3d9d04,46507,1731671195095; all regions closed. 2024-11-15T11:47:14,891 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,892 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,892 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,892 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,892 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:14,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741898_1083 (size=825) 2024-11-15T11:47:14,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741898_1083 (size=825) 2024-11-15T11:47:15,637 INFO [regionserver/7adf9b3d9d04:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:47:15,790 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15fdfea3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45125, datanodeUuid=053e9b0a-0ad1-4ae0-be32-c63262dd71be, infoPort=45045, infoSecurePort=0, ipcPort=40387, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741835_1011 to 127.0.0.1:41789 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:15,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:47:16,420 INFO [master/7adf9b3d9d04:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T11:47:16,420 INFO [master/7adf9b3d9d04:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T11:47:16,790 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@44bcc154[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45125, datanodeUuid=053e9b0a-0ad1-4ae0-be32-c63262dd71be, infoPort=45045, infoSecurePort=0, ipcPort=40387, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741829_1005 to 127.0.0.1:41789 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:16,790 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15fdfea3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45125, datanodeUuid=053e9b0a-0ad1-4ae0-be32-c63262dd71be, infoPort=45045, infoSecurePort=0, ipcPort=40387, storageInfo=lv=-57;cid=testClusterID;nsid=696059885;c=1731671193006):Failed to transfer BP-1235867767-172.17.0.2-1731671193006:blk_1073741827_1003 to 127.0.0.1:41789 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:18,295 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 after 4002ms 2024-11-15T11:47:18,318 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta after 4002ms 2024-11-15T11:47:18,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:47:18,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:47:18,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741878_1061 (size=12911) 2024-11-15T11:47:19,293 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-15T11:47:19,295 DEBUG [RS:1;7adf9b3d9d04:39827 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs 2024-11-15T11:47:19,295 INFO [RS:1;7adf9b3d9d04:39827 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C39827%2C1731671196494:(num 1731671196826) 2024-11-15T11:47:19,295 DEBUG [RS:1;7adf9b3d9d04:39827 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:19,295 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:47:19,295 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:47:19,296 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.ChoreService(370): Chore service for: regionserver/7adf9b3d9d04:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T11:47:19,296 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:47:19,296 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:47:19,296 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:47:19,296 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:47:19,296 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:47:19,296 INFO [RS:1;7adf9b3d9d04:39827 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39827 2024-11-15T11:47:19,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:19,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:47:19,354 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:47:19,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39827-0x1013f9b0fa70002, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7adf9b3d9d04,39827,1731671196494 2024-11-15T11:47:19,369 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7adf9b3d9d04,39827,1731671196494] 2024-11-15T11:47:19,379 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7adf9b3d9d04,39827,1731671196494 already deleted, retry=false 2024-11-15T11:47:19,379 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7adf9b3d9d04,39827,1731671196494 expired; onlineServers=1 2024-11-15T11:47:19,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39827-0x1013f9b0fa70002, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:47:19,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39827-0x1013f9b0fa70002, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:47:19,469 INFO [RS:1;7adf9b3d9d04:39827 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:47:19,469 INFO [RS:1;7adf9b3d9d04:39827 {}] regionserver.HRegionServer(1031): Exiting; stopping=7adf9b3d9d04,39827,1731671196494; zookeeper connection closed. 2024-11-15T11:47:19,470 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@45a7f13f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@45a7f13f 2024-11-15T11:47:19,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:47:19,854 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T11:47:19,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:19,893 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-15T11:47:19,896 DEBUG [RS:0;7adf9b3d9d04:46507 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs 2024-11-15T11:47:19,896 INFO [RS:0;7adf9b3d9d04:46507 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C46507%2C1731671195095.meta:.meta(num 1731671234291) 2024-11-15T11:47:19,897 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:19,897 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:19,897 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:19,897 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:19,897 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:19,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741892_1076 (size=16308) 2024-11-15T11:47:19,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741892_1076 (size=16308) 2024-11-15T11:47:19,902 DEBUG [RS:0;7adf9b3d9d04:46507 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs 2024-11-15T11:47:19,902 INFO [RS:0;7adf9b3d9d04:46507 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C46507%2C1731671195095:(num 1731671233764) 2024-11-15T11:47:19,903 DEBUG [RS:0;7adf9b3d9d04:46507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:19,903 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:47:19,903 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:47:19,903 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.ChoreService(370): Chore service for: regionserver/7adf9b3d9d04:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T11:47:19,903 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:47:19,903 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:47:19,904 INFO [RS:0;7adf9b3d9d04:46507 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46507 2024-11-15T11:47:19,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7adf9b3d9d04,46507,1731671195095 2024-11-15T11:47:19,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:47:19,949 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:47:19,949 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7adf9b3d9d04,46507,1731671195095] 2024-11-15T11:47:19,965 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7adf9b3d9d04,46507,1731671195095 already deleted, retry=false 2024-11-15T11:47:19,965 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7adf9b3d9d04,46507,1731671195095 expired; onlineServers=0 2024-11-15T11:47:19,965 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7adf9b3d9d04,34851,1731671194886' ***** 2024-11-15T11:47:19,965 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T11:47:19,965 INFO [M:0;7adf9b3d9d04:34851 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:47:19,966 INFO [M:0;7adf9b3d9d04:34851 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:47:19,966 DEBUG [M:0;7adf9b3d9d04:34851 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T11:47:19,966 DEBUG [M:0;7adf9b3d9d04:34851 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T11:47:19,966 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T11:47:19,966 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671195491 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671195491,5,FailOnTimeoutGroup] 2024-11-15T11:47:19,966 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671195489 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671195489,5,FailOnTimeoutGroup] 2024-11-15T11:47:19,966 INFO [M:0;7adf9b3d9d04:34851 {}] hbase.ChoreService(370): Chore service for: master/7adf9b3d9d04:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T11:47:19,966 INFO [M:0;7adf9b3d9d04:34851 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:47:19,967 DEBUG [M:0;7adf9b3d9d04:34851 {}] master.HMaster(1795): Stopping service threads 2024-11-15T11:47:19,967 INFO [M:0;7adf9b3d9d04:34851 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T11:47:19,967 INFO [M:0;7adf9b3d9d04:34851 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:47:19,967 INFO [M:0;7adf9b3d9d04:34851 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T11:47:19,967 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T11:47:19,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T11:47:19,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:19,974 DEBUG [M:0;7adf9b3d9d04:34851 {}] zookeeper.ZKUtil(347): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T11:47:19,974 WARN [M:0;7adf9b3d9d04:34851 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T11:47:19,975 INFO [M:0;7adf9b3d9d04:34851 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/.lastflushedseqids 2024-11-15T11:47:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741905_1091 (size=130) 2024-11-15T11:47:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741905_1091 (size=130) 2024-11-15T11:47:19,983 INFO [M:0;7adf9b3d9d04:34851 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T11:47:19,983 INFO [M:0;7adf9b3d9d04:34851 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T11:47:19,983 DEBUG [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:47:19,983 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:19,983 DEBUG [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:19,983 DEBUG [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:47:19,983 DEBUG [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:19,984 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-15T11:47:20,003 DEBUG [M:0;7adf9b3d9d04:34851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e1b009e4c1614d4ab7bf28cef6382095 is 82, key is hbase:meta,,1/info:regioninfo/1731671196286/Put/seqid=0 2024-11-15T11:47:20,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741906_1092 (size=5672) 2024-11-15T11:47:20,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741906_1092 (size=5672) 2024-11-15T11:47:20,009 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e1b009e4c1614d4ab7bf28cef6382095 2024-11-15T11:47:20,031 DEBUG [M:0;7adf9b3d9d04:34851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0843326e2bed470abb42d491391beac8 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731671197513/Put/seqid=0 2024-11-15T11:47:20,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741907_1093 (size=6255) 2024-11-15T11:47:20,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741907_1093 (size=6255) 2024-11-15T11:47:20,049 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0843326e2bed470abb42d491391beac8 2024-11-15T11:47:20,056 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0843326e2bed470abb42d491391beac8 2024-11-15T11:47:20,057 INFO [RS:0;7adf9b3d9d04:46507 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:47:20,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:47:20,057 INFO [RS:0;7adf9b3d9d04:46507 {}] regionserver.HRegionServer(1031): Exiting; stopping=7adf9b3d9d04,46507,1731671195095; zookeeper connection closed. 2024-11-15T11:47:20,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46507-0x1013f9b0fa70001, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:47:20,057 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3d2c4041 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3d2c4041 2024-11-15T11:47:20,058 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-15T11:47:20,079 DEBUG [M:0;7adf9b3d9d04:34851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/70435357481a45c18890e31eb1664e48 is 69, key is 7adf9b3d9d04,39827,1731671196494/rs:state/1731671196629/Put/seqid=0 2024-11-15T11:47:20,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741908_1094 (size=5224) 2024-11-15T11:47:20,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741908_1094 (size=5224) 2024-11-15T11:47:20,090 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/70435357481a45c18890e31eb1664e48 2024-11-15T11:47:20,114 DEBUG [M:0;7adf9b3d9d04:34851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e500a21d758d427bb00cf9622f78b3e8 is 52, key is load_balancer_on/state:d/1731671196476/Put/seqid=0 2024-11-15T11:47:20,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741909_1095 (size=5056) 2024-11-15T11:47:20,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741909_1095 (size=5056) 2024-11-15T11:47:20,121 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e500a21d758d427bb00cf9622f78b3e8 2024-11-15T11:47:20,128 DEBUG [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e1b009e4c1614d4ab7bf28cef6382095 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e1b009e4c1614d4ab7bf28cef6382095 2024-11-15T11:47:20,135 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e1b009e4c1614d4ab7bf28cef6382095, entries=8, sequenceid=60, filesize=5.5 K 2024-11-15T11:47:20,137 DEBUG [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0843326e2bed470abb42d491391beac8 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0843326e2bed470abb42d491391beac8 2024-11-15T11:47:20,144 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0843326e2bed470abb42d491391beac8 2024-11-15T11:47:20,144 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0843326e2bed470abb42d491391beac8, entries=6, sequenceid=60, filesize=6.1 K 2024-11-15T11:47:20,145 DEBUG [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/70435357481a45c18890e31eb1664e48 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/70435357481a45c18890e31eb1664e48 2024-11-15T11:47:20,152 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/70435357481a45c18890e31eb1664e48, entries=2, sequenceid=60, filesize=5.1 K 2024-11-15T11:47:20,153 DEBUG [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e500a21d758d427bb00cf9622f78b3e8 as hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e500a21d758d427bb00cf9622f78b3e8 2024-11-15T11:47:20,160 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e500a21d758d427bb00cf9622f78b3e8, entries=1, sequenceid=60, filesize=4.9 K 2024-11-15T11:47:20,161 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 178ms, sequenceid=60, compaction requested=false 2024-11-15T11:47:20,163 INFO [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:20,163 DEBUG [M:0;7adf9b3d9d04:34851 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671239983Disabling compacts and flushes for region at 1731671239983Disabling writes for close at 1731671239983Obtaining lock to block concurrent updates at 1731671239984 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731671239984Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731671239984Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731671239985 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731671239985Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731671240002 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731671240002Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731671240015 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731671240030 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731671240030Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731671240057 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731671240079 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731671240079Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731671240097 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731671240113 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731671240113Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24692db7: reopening flushed file at 1731671240127 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f29edb6: reopening flushed file at 1731671240135 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5585c27c: reopening flushed file at 1731671240144 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e996f51: reopening flushed file at 1731671240152 (+8 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 178ms, sequenceid=60, compaction requested=false at 1731671240161 (+9 ms)Writing region close event to WAL at 1731671240163 (+2 ms)Closed at 1731671240163 2024-11-15T11:47:20,163 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:20,163 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:20,164 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:20,164 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:20,164 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:20,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741890_1073 (size=1045) 2024-11-15T11:47:20,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45125 is added to blk_1073741890_1073 (size=1045) 2024-11-15T11:47:20,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:20,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:20,810 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1e6d58b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1235867767-172.17.0.2-1731671193006:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:43663,null,null]) java.net.ConnectException: Call From 7adf9b3d9d04/172.17.0.2 to localhost:44705 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T11:47:21,016 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T11:47:21,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:47:21,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T11:47:21,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T11:47:21,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:21,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:21,512 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/WALs/7adf9b3d9d04,34851,1731671194886/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/oldWALs/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 2024-11-15T11:47:21,516 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/MasterData/oldWALs/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275 to hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/oldWALs/7adf9b3d9d04%2C34851%2C1731671194886.1731671195275$masterlocalwal$ 2024-11-15T11:47:21,517 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:47:21,517 INFO [M:0;7adf9b3d9d04:34851 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T11:47:21,517 INFO [M:0;7adf9b3d9d04:34851 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34851 2024-11-15T11:47:21,517 INFO [M:0;7adf9b3d9d04:34851 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:47:21,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:47:21,673 INFO [M:0;7adf9b3d9d04:34851 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:47:21,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34851-0x1013f9b0fa70000, quorum=127.0.0.1:61276, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:47:21,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@575d7e2f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:21,677 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39eaf0e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:47:21,677 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:47:21,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54f27916{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:47:21,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22c6c03b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,STOPPED} 2024-11-15T11:47:21,679 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:47:21,679 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:47:21,679 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:47:21,679 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1235867767-172.17.0.2-1731671193006 (Datanode Uuid 053e9b0a-0ad1-4ae0-be32-c63262dd71be) service to localhost/127.0.0.1:40139 2024-11-15T11:47:21,678 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@79624b85 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1235867767-172.17.0.2-1731671193006:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:43663,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44705 , LocalHost:localPort 7adf9b3d9d04/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T11:47:21,679 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@79624b85 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1235867767-172.17.0.2-1731671193006:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45125,null,null], DatanodeInfoWithStorage[127.0.0.1:43663,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1235867767-172.17.0.2-1731671193006 2024-11-15T11:47:21,679 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@79624b85 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:43663,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1235867767-172.17.0.2-1731671193006 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:21,679 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@79624b85 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45125,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1235867767-172.17.0.2-1731671193006 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:21,679 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@79624b85 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43663,null,null], DatanodeInfoWithStorage[127.0.0.1:45125,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1235867767-172.17.0.2-1731671193006:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:43663,null,null], DatanodeInfoWithStorage[127.0.0.1:45125,null,null]] 2024-11-15T11:47:21,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data3/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:21,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data4/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:21,680 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:47:21,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@638f230f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:21,682 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69bcca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:47:21,682 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:47:21,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@136e75a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:47:21,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e1ad43e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,STOPPED} 2024-11-15T11:47:21,685 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:47:21,685 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:47:21,685 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:47:21,685 WARN [BP-1235867767-172.17.0.2-1731671193006 heartbeating to localhost/127.0.0.1:40139 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1235867767-172.17.0.2-1731671193006 (Datanode Uuid 185575bb-4573-42fd-888e-d597e7952c60) service to localhost/127.0.0.1:40139 2024-11-15T11:47:21,686 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data9/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:21,686 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/cluster_448bea67-4860-6258-b639-40f04a92d116/data/data10/current/BP-1235867767-172.17.0.2-1731671193006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:21,686 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:47:21,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@94a50db{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:47:21,695 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38184680{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:47:21,695 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:47:21,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa07d80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:47:21,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@475f8022{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir/,STOPPED} 2024-11-15T11:47:21,704 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T11:47:21,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T11:47:21,761 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 77) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40139 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40139 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40139 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40139 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39177 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40139 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40139 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:40139 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:39177 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:40139 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f2790bf46e8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40139 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f2790bf46e8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40139 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40139 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=264 (was 244) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 12), AvailableMemoryMB=10792 (was 11652) 2024-11-15T11:47:21,768 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=264, ProcessCount=11, AvailableMemoryMB=10792 2024-11-15T11:47:21,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T11:47:21,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.log.dir so I do NOT create it in target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0 2024-11-15T11:47:21,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575a95a3-0dd9-f259-0e71-9b222ce45c22/hadoop.tmp.dir so I do NOT create it in target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0 2024-11-15T11:47:21,769 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403, deleteOnExit=true 2024-11-15T11:47:21,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T11:47:21,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/test.cache.data in system properties and HBase conf 2024-11-15T11:47:21,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T11:47:21,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir in system properties and HBase conf 2024-11-15T11:47:21,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T11:47:21,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T11:47:21,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T11:47:21,770 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T11:47:21,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:47:21,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:47:21,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T11:47:21,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:47:21,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T11:47:21,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T11:47:21,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:47:21,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:47:21,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T11:47:21,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/nfs.dump.dir in system properties and HBase conf 2024-11-15T11:47:21,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/java.io.tmpdir in system properties and HBase conf 2024-11-15T11:47:21,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:47:21,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T11:47:21,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T11:47:21,787 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:47:22,067 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:22,074 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:22,076 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:22,076 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:22,076 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:47:22,077 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:22,077 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@624b3986{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:22,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@179d1ca6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:22,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@50fbcccf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/java.io.tmpdir/jetty-localhost-43703-hadoop-hdfs-3_4_1-tests_jar-_-any-15741939244702338563/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:47:22,218 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6699fa8b{HTTP/1.1, (http/1.1)}{localhost:43703} 2024-11-15T11:47:22,218 INFO [Time-limited test {}] server.Server(415): Started @157972ms 2024-11-15T11:47:22,232 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:47:22,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:22,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:22,464 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:22,468 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:22,471 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:22,471 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:22,471 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:47:22,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cb54bd6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:22,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@667c8bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:22,584 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ee5ac4e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/java.io.tmpdir/jetty-localhost-45413-hadoop-hdfs-3_4_1-tests_jar-_-any-15382202841875397190/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:22,584 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51031d29{HTTP/1.1, (http/1.1)}{localhost:45413} 2024-11-15T11:47:22,584 INFO [Time-limited test {}] server.Server(415): Started @158338ms 2024-11-15T11:47:22,586 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:47:22,614 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:22,618 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:22,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:22,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:22,619 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:47:22,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a20a16b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:22,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40375d59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:22,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27e9dc43{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/java.io.tmpdir/jetty-localhost-45595-hadoop-hdfs-3_4_1-tests_jar-_-any-8178770225454559358/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:22,729 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@292bdfb3{HTTP/1.1, (http/1.1)}{localhost:45595} 2024-11-15T11:47:22,729 INFO [Time-limited test {}] server.Server(415): Started @158483ms 2024-11-15T11:47:22,731 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:47:23,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:23,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:23,411 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data1/current/BP-1151629165-172.17.0.2-1731671241800/current, will proceed with Du for space computation calculation, 2024-11-15T11:47:23,415 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data2/current/BP-1151629165-172.17.0.2-1731671241800/current, will proceed with Du for space computation calculation, 2024-11-15T11:47:23,446 WARN [Thread-1172 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:47:23,451 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2e82bf3e2f96c416 with lease ID 0x4044d25632831919: Processing first storage report for DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc from datanode DatanodeRegistration(127.0.0.1:36437, datanodeUuid=7f3e0bea-514b-4e1b-8723-3bd65959348a, infoPort=35161, infoSecurePort=0, ipcPort=38779, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800) 2024-11-15T11:47:23,451 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2e82bf3e2f96c416 with lease ID 0x4044d25632831919: from storage DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc node DatanodeRegistration(127.0.0.1:36437, datanodeUuid=7f3e0bea-514b-4e1b-8723-3bd65959348a, infoPort=35161, infoSecurePort=0, ipcPort=38779, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:47:23,451 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2e82bf3e2f96c416 with lease ID 0x4044d25632831919: Processing first storage report for DS-3deb292d-a09d-462b-90d9-73e7cc3b4198 from datanode DatanodeRegistration(127.0.0.1:36437, datanodeUuid=7f3e0bea-514b-4e1b-8723-3bd65959348a, infoPort=35161, infoSecurePort=0, ipcPort=38779, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800) 2024-11-15T11:47:23,451 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2e82bf3e2f96c416 with lease ID 0x4044d25632831919: from storage DS-3deb292d-a09d-462b-90d9-73e7cc3b4198 node DatanodeRegistration(127.0.0.1:36437, datanodeUuid=7f3e0bea-514b-4e1b-8723-3bd65959348a, infoPort=35161, infoSecurePort=0, ipcPort=38779, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:47:23,550 WARN [Thread-1220 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data4/current/BP-1151629165-172.17.0.2-1731671241800/current, will proceed with Du for space computation calculation, 2024-11-15T11:47:23,550 WARN [Thread-1219 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data3/current/BP-1151629165-172.17.0.2-1731671241800/current, will proceed with Du for space computation calculation, 2024-11-15T11:47:23,570 WARN [Thread-1195 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:47:23,572 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3e8974d3b14260d7 with lease ID 0x4044d2563283191a: Processing first storage report for DS-6d72e768-557c-42c6-8747-33329f0812f3 from datanode DatanodeRegistration(127.0.0.1:36565, datanodeUuid=258b2754-7809-4dff-9d5a-1909f1bd1595, infoPort=44955, infoSecurePort=0, ipcPort=45283, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800) 2024-11-15T11:47:23,572 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3e8974d3b14260d7 with lease ID 0x4044d2563283191a: from storage DS-6d72e768-557c-42c6-8747-33329f0812f3 node DatanodeRegistration(127.0.0.1:36565, datanodeUuid=258b2754-7809-4dff-9d5a-1909f1bd1595, infoPort=44955, infoSecurePort=0, ipcPort=45283, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:47:23,573 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3e8974d3b14260d7 with lease ID 0x4044d2563283191a: Processing first storage report for DS-4a97975f-413b-4aa5-b37c-dd95d6a3dfa9 from datanode DatanodeRegistration(127.0.0.1:36565, datanodeUuid=258b2754-7809-4dff-9d5a-1909f1bd1595, infoPort=44955, infoSecurePort=0, ipcPort=45283, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800) 2024-11-15T11:47:23,573 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3e8974d3b14260d7 with lease ID 0x4044d2563283191a: from storage DS-4a97975f-413b-4aa5-b37c-dd95d6a3dfa9 node DatanodeRegistration(127.0.0.1:36565, datanodeUuid=258b2754-7809-4dff-9d5a-1909f1bd1595, infoPort=44955, infoSecurePort=0, ipcPort=45283, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:47:23,582 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0 2024-11-15T11:47:23,584 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/zookeeper_0, clientPort=53987, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T11:47:23,585 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53987 2024-11-15T11:47:23,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:47:23,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:47:23,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:47:23,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36565 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:47:23,597 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c with version=8 2024-11-15T11:47:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/hbase-staging 2024-11-15T11:47:23,600 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:47:23,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:47:23,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:47:23,600 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:47:23,601 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:47:23,601 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:47:23,601 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T11:47:23,601 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:47:23,602 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45917 2024-11-15T11:47:23,604 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45917 connecting to ZooKeeper ensemble=127.0.0.1:53987 2024-11-15T11:47:23,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:459170x0, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:47:23,657 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45917-0x1013f9bce050000 connected 2024-11-15T11:47:23,732 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:47:23,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:47:23,736 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:47:23,737 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c, hbase.cluster.distributed=false 2024-11-15T11:47:23,739 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:47:23,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45917 2024-11-15T11:47:23,742 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45917 2024-11-15T11:47:23,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45917 2024-11-15T11:47:23,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45917 2024-11-15T11:47:23,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45917 2024-11-15T11:47:23,773 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:47:23,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:47:23,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:47:23,774 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:47:23,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:47:23,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:47:23,774 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:47:23,774 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:47:23,775 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35267 2024-11-15T11:47:23,778 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35267 connecting to ZooKeeper ensemble=127.0.0.1:53987 2024-11-15T11:47:23,779 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:47:23,782 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:47:23,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:352670x0, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:47:23,804 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:352670x0, quorum=127.0.0.1:53987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:47:23,805 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:47:23,805 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35267-0x1013f9bce050001 connected 2024-11-15T11:47:23,807 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:47:23,808 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T11:47:23,809 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:47:23,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35267 2024-11-15T11:47:23,818 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35267 2024-11-15T11:47:23,819 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35267 2024-11-15T11:47:23,819 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35267 2024-11-15T11:47:23,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35267 2024-11-15T11:47:23,833 DEBUG [M:0;7adf9b3d9d04:45917 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7adf9b3d9d04:45917 2024-11-15T11:47:23,834 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7adf9b3d9d04,45917,1731671243600 2024-11-15T11:47:23,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:47:23,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:47:23,846 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7adf9b3d9d04,45917,1731671243600 2024-11-15T11:47:23,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:23,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T11:47:23,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:23,857 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T11:47:23,863 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7adf9b3d9d04,45917,1731671243600 from backup master directory 2024-11-15T11:47:23,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:47:23,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7adf9b3d9d04,45917,1731671243600 2024-11-15T11:47:23,873 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:47:23,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:47:23,873 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7adf9b3d9d04,45917,1731671243600 2024-11-15T11:47:23,879 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/hbase.id] with ID: 57a44f17-f231-4780-b859-1973850757c9 2024-11-15T11:47:23,879 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/.tmp/hbase.id 2024-11-15T11:47:23,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:47:23,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36565 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:47:23,890 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/.tmp/hbase.id]:[hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/hbase.id] 2024-11-15T11:47:23,909 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:47:23,909 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T11:47:23,911 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T11:47:23,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:23,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:23,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36565 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:47:23,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:47:23,939 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:47:23,940 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T11:47:23,940 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:47:23,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36565 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:47:23,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:47:23,951 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store 2024-11-15T11:47:23,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:47:23,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36565 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:47:23,965 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:47:23,965 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:47:23,965 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:23,966 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:23,966 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:47:23,966 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:23,966 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:23,966 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671243965Disabling compacts and flushes for region at 1731671243965Disabling writes for close at 1731671243966 (+1 ms)Writing region close event to WAL at 1731671243966Closed at 1731671243966 2024-11-15T11:47:23,967 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/.initializing 2024-11-15T11:47:23,967 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600 2024-11-15T11:47:23,970 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C45917%2C1731671243600, suffix=, logDir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600, archiveDir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/oldWALs, maxLogs=10 2024-11-15T11:47:23,971 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 2024-11-15T11:47:23,988 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 2024-11-15T11:47:23,993 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35161:35161),(127.0.0.1/127.0.0.1:44955:44955)] 2024-11-15T11:47:24,011 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:47:24,011 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:47:24,011 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,011 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T11:47:24,026 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:24,027 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:47:24,027 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T11:47:24,029 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:24,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:47:24,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T11:47:24,032 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:24,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:47:24,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T11:47:24,035 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:24,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:47:24,036 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,036 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,037 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,038 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,038 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,039 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T11:47:24,041 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:47:24,044 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:47:24,045 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728365, jitterRate=-0.07383705675601959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T11:47:24,046 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731671244011Initializing all the Stores at 1731671244012 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671244012Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671244024 (+12 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671244024Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671244024Cleaning up temporary data from old regions at 1731671244039 (+15 ms)Region opened successfully at 1731671244046 (+7 ms) 2024-11-15T11:47:24,046 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T11:47:24,050 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736fb4d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:47:24,051 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T11:47:24,052 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T11:47:24,052 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T11:47:24,052 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T11:47:24,053 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T11:47:24,053 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T11:47:24,053 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T11:47:24,057 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T11:47:24,058 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T11:47:24,078 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T11:47:24,079 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T11:47:24,079 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T11:47:24,089 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T11:47:24,090 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T11:47:24,091 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T11:47:24,098 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T11:47:24,099 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T11:47:24,106 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T11:47:24,109 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T11:47:24,120 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T11:47:24,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:47:24,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:47:24,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:24,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:24,132 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7adf9b3d9d04,45917,1731671243600, sessionid=0x1013f9bce050000, setting cluster-up flag (Was=false) 2024-11-15T11:47:24,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:24,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:24,173 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T11:47:24,174 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,45917,1731671243600 2024-11-15T11:47:24,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:24,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:24,215 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T11:47:24,216 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,45917,1731671243600 2024-11-15T11:47:24,217 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T11:47:24,219 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T11:47:24,220 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T11:47:24,220 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T11:47:24,220 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7adf9b3d9d04,45917,1731671243600 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T11:47:24,221 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(746): ClusterId : 57a44f17-f231-4780-b859-1973850757c9 2024-11-15T11:47:24,221 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:47:24,222 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:47:24,222 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:47:24,222 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:47:24,222 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:47:24,222 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7adf9b3d9d04:0, corePoolSize=10, maxPoolSize=10 2024-11-15T11:47:24,222 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,222 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:47:24,222 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,232 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:47:24,232 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T11:47:24,233 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:47:24,233 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:47:24,234 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:24,234 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T11:47:24,241 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:47:24,241 DEBUG [RS:0;7adf9b3d9d04:35267 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fa1405b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:47:24,255 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731671274255 2024-11-15T11:47:24,255 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T11:47:24,255 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T11:47:24,255 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T11:47:24,255 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T11:47:24,255 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T11:47:24,255 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T11:47:24,258 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,267 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T11:47:24,267 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T11:47:24,267 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T11:47:24,271 DEBUG [RS:0;7adf9b3d9d04:35267 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7adf9b3d9d04:35267 2024-11-15T11:47:24,271 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:47:24,271 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:47:24,271 DEBUG [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:47:24,272 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(2659): reportForDuty to master=7adf9b3d9d04,45917,1731671243600 with port=35267, startcode=1731671243773 2024-11-15T11:47:24,273 DEBUG [RS:0;7adf9b3d9d04:35267 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:47:24,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36565 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:47:24,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:47:24,286 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T11:47:24,286 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c 2024-11-15T11:47:24,286 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T11:47:24,287 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T11:47:24,297 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671244287,5,FailOnTimeoutGroup] 2024-11-15T11:47:24,297 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671244297,5,FailOnTimeoutGroup] 2024-11-15T11:47:24,297 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,297 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T11:47:24,298 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,298 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,299 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42457, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:47:24,300 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45917 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:24,300 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45917 {}] master.ServerManager(517): Registering regionserver=7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:24,303 DEBUG [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c 2024-11-15T11:47:24,303 DEBUG [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37741 2024-11-15T11:47:24,303 DEBUG [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:47:24,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:24,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:47:24,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36565 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:47:24,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:47:24,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:24,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:47:24,324 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:47:24,324 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:24,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:47:24,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:47:24,327 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:47:24,327 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:24,327 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:47:24,327 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:47:24,329 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:47:24,329 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:24,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:47:24,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:47:24,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:47:24,332 DEBUG [RS:0;7adf9b3d9d04:35267 {}] zookeeper.ZKUtil(111): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:24,332 WARN [RS:0;7adf9b3d9d04:35267 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:47:24,332 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:47:24,332 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:24,332 INFO [RS:0;7adf9b3d9d04:35267 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:47:24,332 DEBUG [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:24,332 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7adf9b3d9d04,35267,1731671243773] 2024-11-15T11:47:24,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:47:24,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:47:24,334 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740 2024-11-15T11:47:24,334 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740 2024-11-15T11:47:24,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:47:24,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:47:24,336 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:47:24,336 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:47:24,337 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:47:24,338 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:47:24,338 INFO [RS:0;7adf9b3d9d04:35267 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:47:24,338 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,342 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:47:24,343 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:47:24,344 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:47:24,344 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,344 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,344 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,344 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,344 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,344 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,344 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:47:24,344 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,344 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,344 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,345 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,345 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,345 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:47:24,345 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:47:24,345 DEBUG [RS:0;7adf9b3d9d04:35267 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:47:24,346 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,347 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,347 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,347 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,347 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,347 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,35267,1731671243773-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:47:24,349 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785436, jitterRate=-0.0012668073177337646}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:47:24,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731671244317Initializing all the Stores at 1731671244318 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671244319 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671244322 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671244322Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671244322Cleaning up temporary data from old regions at 1731671244335 (+13 ms)Region opened successfully at 1731671244350 (+15 ms) 2024-11-15T11:47:24,351 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:47:24,351 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:47:24,351 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:47:24,351 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:47:24,351 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:47:24,351 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:47:24,351 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671244350Disabling compacts and flushes for region at 1731671244350Disabling writes for close at 1731671244351 (+1 ms)Writing region close event to WAL at 1731671244351Closed at 1731671244351 2024-11-15T11:47:24,353 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:47:24,353 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T11:47:24,353 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T11:47:24,355 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:47:24,356 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T11:47:24,372 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:47:24,373 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,35267,1731671243773-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,373 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,373 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.Replication(171): 7adf9b3d9d04,35267,1731671243773 started 2024-11-15T11:47:24,392 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:24,392 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(1482): Serving as 7adf9b3d9d04,35267,1731671243773, RpcServer on 7adf9b3d9d04/172.17.0.2:35267, sessionid=0x1013f9bce050001 2024-11-15T11:47:24,393 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:47:24,393 DEBUG [RS:0;7adf9b3d9d04:35267 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:24,393 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,35267,1731671243773' 2024-11-15T11:47:24,393 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:47:24,393 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:47:24,394 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:47:24,394 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:47:24,394 DEBUG [RS:0;7adf9b3d9d04:35267 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:24,394 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,35267,1731671243773' 2024-11-15T11:47:24,394 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:47:24,394 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:47:24,395 DEBUG [RS:0;7adf9b3d9d04:35267 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:47:24,395 INFO [RS:0;7adf9b3d9d04:35267 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:47:24,395 INFO [RS:0;7adf9b3d9d04:35267 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:47:24,497 INFO [RS:0;7adf9b3d9d04:35267 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C35267%2C1731671243773, suffix=, logDir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773, archiveDir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/oldWALs, maxLogs=32 2024-11-15T11:47:24,498 INFO [RS:0;7adf9b3d9d04:35267 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 2024-11-15T11:47:24,505 INFO [RS:0;7adf9b3d9d04:35267 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 2024-11-15T11:47:24,506 WARN [7adf9b3d9d04:45917 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-15T11:47:24,506 DEBUG [RS:0;7adf9b3d9d04:35267 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35161:35161),(127.0.0.1/127.0.0.1:44955:44955)] 2024-11-15T11:47:24,756 DEBUG [7adf9b3d9d04:45917 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T11:47:24,757 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:24,759 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,35267,1731671243773, state=OPENING 2024-11-15T11:47:24,823 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T11:47:24,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:24,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:24,832 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:47:24,832 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:47:24,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,35267,1731671243773}] 2024-11-15T11:47:24,834 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:47:24,987 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:47:24,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49177, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:47:24,993 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T11:47:24,994 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:47:24,996 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C35267%2C1731671243773.meta, suffix=.meta, logDir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773, archiveDir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/oldWALs, maxLogs=32 2024-11-15T11:47:24,996 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta 2024-11-15T11:47:25,008 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta 2024-11-15T11:47:25,027 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44955:44955),(127.0.0.1/127.0.0.1:35161:35161)] 2024-11-15T11:47:25,038 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:47:25,038 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T11:47:25,038 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T11:47:25,039 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T11:47:25,039 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T11:47:25,039 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:47:25,039 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T11:47:25,039 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T11:47:25,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:47:25,044 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:47:25,044 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:25,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:47:25,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:47:25,046 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:47:25,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:25,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:47:25,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:47:25,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:47:25,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:25,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:47:25,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:47:25,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:47:25,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:25,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:47:25,052 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:47:25,053 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740 2024-11-15T11:47:25,055 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740 2024-11-15T11:47:25,056 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:47:25,056 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:47:25,057 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:47:25,060 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:47:25,062 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744779, jitterRate=-0.052965715527534485}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:47:25,062 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T11:47:25,063 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731671245039Writing region info on filesystem at 1731671245039Initializing all the Stores at 1731671245040 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671245040Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671245042 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671245042Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671245042Cleaning up temporary data from old regions at 1731671245056 (+14 ms)Running coprocessor post-open hooks at 1731671245062 (+6 ms)Region opened successfully at 1731671245063 (+1 ms) 2024-11-15T11:47:25,064 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731671244987 2024-11-15T11:47:25,067 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T11:47:25,067 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T11:47:25,068 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:25,070 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,35267,1731671243773, state=OPEN 2024-11-15T11:47:25,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:47:25,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:47:25,149 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:25,149 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:47:25,149 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:47:25,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T11:47:25,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,35267,1731671243773 in 317 msec 2024-11-15T11:47:25,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T11:47:25,156 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 799 msec 2024-11-15T11:47:25,156 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:47:25,157 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T11:47:25,158 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:47:25,158 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,35267,1731671243773, seqNum=-1] 2024-11-15T11:47:25,159 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:47:25,160 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41749, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:47:25,167 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 947 msec 2024-11-15T11:47:25,167 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731671245167, completionTime=-1 2024-11-15T11:47:25,167 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T11:47:25,167 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-15T11:47:25,170 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-15T11:47:25,170 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731671305170 2024-11-15T11:47:25,170 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731671365170 2024-11-15T11:47:25,170 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-15T11:47:25,170 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45917,1731671243600-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:25,170 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45917,1731671243600-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:25,170 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45917,1731671243600-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:25,171 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7adf9b3d9d04:45917, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:25,171 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:25,171 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:25,173 DEBUG [master/7adf9b3d9d04:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T11:47:25,175 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.302sec 2024-11-15T11:47:25,176 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T11:47:25,176 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T11:47:25,176 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T11:47:25,176 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T11:47:25,176 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T11:47:25,176 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45917,1731671243600-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:47:25,176 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45917,1731671243600-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T11:47:25,179 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T11:47:25,179 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T11:47:25,179 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45917,1731671243600-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:47:25,223 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22dbe5ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:47:25,223 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7adf9b3d9d04,45917,-1 for getting cluster id 2024-11-15T11:47:25,223 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:47:25,225 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '57a44f17-f231-4780-b859-1973850757c9' 2024-11-15T11:47:25,225 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:47:25,226 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "57a44f17-f231-4780-b859-1973850757c9" 2024-11-15T11:47:25,226 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@324afde6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:47:25,226 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7adf9b3d9d04,45917,-1] 2024-11-15T11:47:25,226 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:47:25,227 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:25,228 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51956, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:47:25,229 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@355539f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:47:25,230 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:47:25,231 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,35267,1731671243773, seqNum=-1] 2024-11-15T11:47:25,232 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:47:25,234 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47300, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:47:25,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7adf9b3d9d04,45917,1731671243600 2024-11-15T11:47:25,236 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:47:25,239 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T11:47:25,239 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-15T11:47:25,239 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-15T11:47:25,240 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T11:47:25,241 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 7adf9b3d9d04,45917,1731671243600 2024-11-15T11:47:25,241 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@137b4ea6 2024-11-15T11:47:25,241 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T11:47:25,244 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51970, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T11:47:25,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T11:47:25,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T11:47:25,245 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:47:25,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T11:47:25,248 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:47:25,248 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:25,248 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-15T11:47:25,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:47:25,250 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:47:25,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36565 is added to blk_1073741835_1011 (size=395) 2024-11-15T11:47:25,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741835_1011 (size=395) 2024-11-15T11:47:25,279 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d51577c3ee67012e0e09c8becd49aa32, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c 2024-11-15T11:47:25,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741836_1012 (size=78) 2024-11-15T11:47:25,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36565 is added to blk_1073741836_1012 (size=78) 2024-11-15T11:47:25,292 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:47:25,292 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing d51577c3ee67012e0e09c8becd49aa32, disabling compactions & flushes 2024-11-15T11:47:25,292 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:25,293 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:25,293 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. after waiting 0 ms 2024-11-15T11:47:25,293 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:25,293 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:25,293 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for d51577c3ee67012e0e09c8becd49aa32: Waiting for close lock at 1731671245292Disabling compacts and flushes for region at 1731671245292Disabling writes for close at 1731671245293 (+1 ms)Writing region close event to WAL at 1731671245293Closed at 1731671245293 2024-11-15T11:47:25,294 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:47:25,295 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731671245295"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731671245295"}]},"ts":"1731671245295"} 2024-11-15T11:47:25,298 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T11:47:25,300 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:47:25,300 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731671245300"}]},"ts":"1731671245300"} 2024-11-15T11:47:25,303 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-15T11:47:25,303 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d51577c3ee67012e0e09c8becd49aa32, ASSIGN}] 2024-11-15T11:47:25,305 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d51577c3ee67012e0e09c8becd49aa32, ASSIGN 2024-11-15T11:47:25,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:25,307 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d51577c3ee67012e0e09c8becd49aa32, ASSIGN; state=OFFLINE, location=7adf9b3d9d04,35267,1731671243773; forceNewPlan=false, retain=false 2024-11-15T11:47:25,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:25,457 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d51577c3ee67012e0e09c8becd49aa32, regionState=OPENING, regionLocation=7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:25,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d51577c3ee67012e0e09c8becd49aa32, ASSIGN because future has completed 2024-11-15T11:47:25,461 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d51577c3ee67012e0e09c8becd49aa32, server=7adf9b3d9d04,35267,1731671243773}] 2024-11-15T11:47:25,620 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:25,621 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d51577c3ee67012e0e09c8becd49aa32, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:47:25,621 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,621 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:47:25,621 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,622 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,623 INFO [StoreOpener-d51577c3ee67012e0e09c8becd49aa32-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,625 INFO [StoreOpener-d51577c3ee67012e0e09c8becd49aa32-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d51577c3ee67012e0e09c8becd49aa32 columnFamilyName info 2024-11-15T11:47:25,626 DEBUG [StoreOpener-d51577c3ee67012e0e09c8becd49aa32-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:47:25,626 INFO [StoreOpener-d51577c3ee67012e0e09c8becd49aa32-1 {}] regionserver.HStore(327): Store=d51577c3ee67012e0e09c8becd49aa32/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:47:25,626 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,627 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,628 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,628 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,628 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,630 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,632 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d51577c3ee67012e0e09c8becd49aa32/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:47:25,633 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d51577c3ee67012e0e09c8becd49aa32; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752484, jitterRate=-0.043167561292648315}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:47:25,633 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:25,634 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d51577c3ee67012e0e09c8becd49aa32: Running coprocessor pre-open hook at 1731671245622Writing region info on filesystem at 1731671245622Initializing all the Stores at 1731671245623 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671245623Cleaning up temporary data from old regions at 1731671245628 (+5 ms)Running coprocessor post-open hooks at 1731671245633 (+5 ms)Region opened successfully at 1731671245634 (+1 ms) 2024-11-15T11:47:25,635 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32., pid=6, masterSystemTime=1731671245615 2024-11-15T11:47:25,637 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:25,637 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:25,638 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d51577c3ee67012e0e09c8becd49aa32, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:25,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d51577c3ee67012e0e09c8becd49aa32, server=7adf9b3d9d04,35267,1731671243773 because future has completed 2024-11-15T11:47:25,644 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T11:47:25,644 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d51577c3ee67012e0e09c8becd49aa32, server=7adf9b3d9d04,35267,1731671243773 in 181 msec 2024-11-15T11:47:25,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T11:47:25,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d51577c3ee67012e0e09c8becd49aa32, ASSIGN in 341 msec 2024-11-15T11:47:25,648 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:47:25,649 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731671245648"}]},"ts":"1731671245648"} 2024-11-15T11:47:25,651 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-15T11:47:25,652 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:47:25,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 407 msec 2024-11-15T11:47:26,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:26,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:26,519 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T11:47:26,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:26,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:26,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:26,543 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:26,543 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:26,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:26,548 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:26,548 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:26,548 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:26,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:27,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:27,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:28,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:28,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:29,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:29,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:30,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:30,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:30,336 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T11:47:30,337 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-15T11:47:31,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T11:47:31,017 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T11:47:31,018 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T11:47:31,018 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-15T11:47:31,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:47:31,019 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T11:47:31,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T11:47:31,019 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T11:47:31,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:31,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:32,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:32,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:33,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:33,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:34,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:34,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:35,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:35,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:35,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:47:35,342 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-15T11:47:35,342 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-15T11:47:35,346 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T11:47:35,346 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:35,352 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32., hostname=7adf9b3d9d04,35267,1731671243773, seqNum=2] 2024-11-15T11:47:36,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:36,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:37,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:37,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:37,355 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 2024-11-15T11:47:37,356 WARN [ResponseProcessor for block BP-1151629165-172.17.0.2-1731671241800:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1151629165-172.17.0.2-1731671241800:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:37,356 WARN [ResponseProcessor for block BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:36565,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:37,356 WARN [DataStreamer for file /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta block BP-1151629165-172.17.0.2-1731671241800:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1151629165-172.17.0.2-1731671241800:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36565,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK], DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36565,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK]) is bad. 2024-11-15T11:47:37,356 WARN [DataStreamer for file /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 block BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK], DatanodeInfoWithStorage[127.0.0.1:36565,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36565,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK]) is bad. 2024-11-15T11:47:37,356 WARN [ResponseProcessor for block BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:36565,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:37,356 WARN [PacketResponder: BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36565] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,356 WARN [DataStreamer for file /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 block BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK], DatanodeInfoWithStorage[127.0.0.1:36565,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36565,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK]) is bad. 2024-11-15T11:47:37,356 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-639172724_22 at /127.0.0.1:50174 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36565:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50174 dst: /127.0.0.1:36565 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-639172724_22 at /127.0.0.1:48114 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48114 dst: /127.0.0.1:36437 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-639172724_22 at /127.0.0.1:48104 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48104 dst: /127.0.0.1:36437 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-639172724_22 at /127.0.0.1:50160 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36565:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50160 dst: /127.0.0.1:36565 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,357 WARN [PacketResponder: BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36565] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,358 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2013189632_22 at /127.0.0.1:48074 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48074 dst: /127.0.0.1:36437 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,358 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2013189632_22 at /127.0.0.1:50126 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36565:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50126 dst: /127.0.0.1:36565 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27e9dc43{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:37,394 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@292bdfb3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:47:37,394 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:47:37,395 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40375d59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:47:37,395 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a20a16b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,STOPPED} 2024-11-15T11:47:37,396 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:47:37,396 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:47:37,397 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1151629165-172.17.0.2-1731671241800 (Datanode Uuid 258b2754-7809-4dff-9d5a-1909f1bd1595) service to localhost/127.0.0.1:37741 2024-11-15T11:47:37,397 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:47:37,397 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data3/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:37,397 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data4/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:37,398 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:47:37,407 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:37,411 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:37,412 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:37,412 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:37,412 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:47:37,412 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6299f50b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:37,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22d0d1b0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:37,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@570cb725{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/java.io.tmpdir/jetty-localhost-44087-hadoop-hdfs-3_4_1-tests_jar-_-any-10973336490942941621/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:37,518 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35808fda{HTTP/1.1, (http/1.1)}{localhost:44087} 2024-11-15T11:47:37,518 INFO [Time-limited test {}] server.Server(415): Started @173272ms 2024-11-15T11:47:37,519 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:47:37,539 WARN [ResponseProcessor for block BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:37,539 WARN [ResponseProcessor for block BP-1151629165-172.17.0.2-1731671241800:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1151629165-172.17.0.2-1731671241800:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:37,539 WARN [ResponseProcessor for block BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:37,540 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2013189632_22 at /127.0.0.1:58838 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58838 dst: /127.0.0.1:36437 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,540 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-639172724_22 at /127.0.0.1:58816 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58816 dst: /127.0.0.1:36437 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,540 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-639172724_22 at /127.0.0.1:58832 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58832 dst: /127.0.0.1:36437 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:37,541 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ee5ac4e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:37,542 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51031d29{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:47:37,542 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:47:37,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@667c8bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:47:37,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cb54bd6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,STOPPED} 2024-11-15T11:47:37,544 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:47:37,544 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:47:37,544 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1151629165-172.17.0.2-1731671241800 (Datanode Uuid 7f3e0bea-514b-4e1b-8723-3bd65959348a) service to localhost/127.0.0.1:37741 2024-11-15T11:47:37,544 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:47:37,544 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data1/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:37,545 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data2/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:37,545 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:47:37,567 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:37,571 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:37,571 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:37,571 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:37,571 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:47:37,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d802677{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:37,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b559376{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:37,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@929fb8a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/java.io.tmpdir/jetty-localhost-36021-hadoop-hdfs-3_4_1-tests_jar-_-any-16020594291368863248/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:37,676 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39f8899d{HTTP/1.1, (http/1.1)}{localhost:36021} 2024-11-15T11:47:37,676 INFO [Time-limited test {}] server.Server(415): Started @173430ms 2024-11-15T11:47:37,677 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:47:37,983 WARN [Thread-1343 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:47:37,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59a6ca657a114ff2 with lease ID 0x4044d2563283191b: from storage DS-6d72e768-557c-42c6-8747-33329f0812f3 node DatanodeRegistration(127.0.0.1:37531, datanodeUuid=258b2754-7809-4dff-9d5a-1909f1bd1595, infoPort=42495, infoSecurePort=0, ipcPort=45929, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:47:37,986 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59a6ca657a114ff2 with lease ID 0x4044d2563283191b: from storage DS-4a97975f-413b-4aa5-b37c-dd95d6a3dfa9 node DatanodeRegistration(127.0.0.1:37531, datanodeUuid=258b2754-7809-4dff-9d5a-1909f1bd1595, infoPort=42495, infoSecurePort=0, ipcPort=45929, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:47:38,108 WARN [Thread-1363 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:47:38,110 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a6af603f8d04ce2 with lease ID 0x4044d2563283191c: from storage DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc node DatanodeRegistration(127.0.0.1:44115, datanodeUuid=7f3e0bea-514b-4e1b-8723-3bd65959348a, infoPort=34561, infoSecurePort=0, ipcPort=41129, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:47:38,111 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a6af603f8d04ce2 with lease ID 0x4044d2563283191c: from storage DS-3deb292d-a09d-462b-90d9-73e7cc3b4198 node DatanodeRegistration(127.0.0.1:44115, datanodeUuid=7f3e0bea-514b-4e1b-8723-3bd65959348a, infoPort=34561, infoSecurePort=0, ipcPort=41129, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:47:38,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:38,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:38,696 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-15T11:47:38,699 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-15T11:47:38,701 ERROR [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c-prefix:7adf9b3d9d04,35267,1731671243773 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:38,701 WARN [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c-prefix:7adf9b3d9d04,35267,1731671243773 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:38,702 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C35267%2C1731671243773:(num 1731671244498) roll requested 2024-11-15T11:47:38,702 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 2024-11-15T11:47:38,718 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 newFile=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 2024-11-15T11:47:38,718 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:38,718 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:38,718 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:38,718 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:38,718 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:38,718 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 2024-11-15T11:47:38,719 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:38,719 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:38,719 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 2024-11-15T11:47:38,719 WARN [IPC Server handler 4 on default port 37741 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-11-15T11:47:38,719 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 after 0ms 2024-11-15T11:47:38,722 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42495:42495),(127.0.0.1/127.0.0.1:34561:34561)] 2024-11-15T11:47:38,722 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 is not closed yet, will try archiving it next time 2024-11-15T11:47:39,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:39,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:40,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:40,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:40,726 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-15T11:47:40,985 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T11:47:41,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:41,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:42,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:42,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:42,720 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 after 4001ms 2024-11-15T11:47:42,731 WARN [ResponseProcessor for block BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:44115,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:42,731 WARN [DataStreamer for file /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 block BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37531,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK], DatanodeInfoWithStorage[127.0.0.1:44115,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44115,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]) is bad. 2024-11-15T11:47:42,731 WARN [PacketResponder: BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44115] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:42,732 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-639172724_22 at /127.0.0.1:49088 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49088 dst: /127.0.0.1:37531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:42,732 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-639172724_22 at /127.0.0.1:44296 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44115:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44296 dst: /127.0.0.1:44115 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:42,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@929fb8a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:42,769 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39f8899d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:47:42,769 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:47:42,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b559376{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:47:42,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d802677{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,STOPPED} 2024-11-15T11:47:42,771 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:47:42,771 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:47:42,771 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:47:42,771 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1151629165-172.17.0.2-1731671241800 (Datanode Uuid 7f3e0bea-514b-4e1b-8723-3bd65959348a) service to localhost/127.0.0.1:37741 2024-11-15T11:47:42,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data1/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:42,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data2/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:42,773 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:47:42,781 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:42,785 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:42,787 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:42,787 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:42,787 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:47:42,789 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a79eeb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:42,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d237fac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:42,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@48919a97{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/java.io.tmpdir/jetty-localhost-36309-hadoop-hdfs-3_4_1-tests_jar-_-any-6800765816154143161/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:42,894 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4778d192{HTTP/1.1, (http/1.1)}{localhost:36309} 2024-11-15T11:47:42,894 INFO [Time-limited test {}] server.Server(415): Started @178648ms 2024-11-15T11:47:42,896 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:47:42,931 WARN [ResponseProcessor for block BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:42,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-639172724_22 at /127.0.0.1:38110 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38110 dst: /127.0.0.1:37531 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:42,933 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@570cb725{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:42,934 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35808fda{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:47:42,934 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:47:42,934 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22d0d1b0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:47:42,934 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6299f50b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,STOPPED} 2024-11-15T11:47:42,937 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:47:42,937 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:47:42,937 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:47:42,937 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1151629165-172.17.0.2-1731671241800 (Datanode Uuid 258b2754-7809-4dff-9d5a-1909f1bd1595) service to localhost/127.0.0.1:37741 2024-11-15T11:47:42,937 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data3/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:42,938 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data4/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:42,938 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:47:42,952 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:42,958 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:42,959 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:42,959 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:42,959 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:47:42,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@160f7ba9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:42,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fc558ce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:43,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39e17fd3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/java.io.tmpdir/jetty-localhost-36415-hadoop-hdfs-3_4_1-tests_jar-_-any-11444058596480657693/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:43,064 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b16a6e2{HTTP/1.1, (http/1.1)}{localhost:36415} 2024-11-15T11:47:43,064 INFO [Time-limited test {}] server.Server(415): Started @178818ms 2024-11-15T11:47:43,066 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:47:43,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:43,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:43,519 WARN [Thread-1417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:47:43,521 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f32aae9b901725c with lease ID 0x4044d2563283191d: from storage DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc node DatanodeRegistration(127.0.0.1:43995, datanodeUuid=7f3e0bea-514b-4e1b-8723-3bd65959348a, infoPort=35409, infoSecurePort=0, ipcPort=37779, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:47:43,521 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f32aae9b901725c with lease ID 0x4044d2563283191d: from storage DS-3deb292d-a09d-462b-90d9-73e7cc3b4198 node DatanodeRegistration(127.0.0.1:43995, datanodeUuid=7f3e0bea-514b-4e1b-8723-3bd65959348a, infoPort=35409, infoSecurePort=0, ipcPort=37779, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:47:43,589 WARN [Thread-1437 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:47:43,592 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c76340904cf388f with lease ID 0x4044d2563283191e: from storage DS-6d72e768-557c-42c6-8747-33329f0812f3 node DatanodeRegistration(127.0.0.1:46671, datanodeUuid=258b2754-7809-4dff-9d5a-1909f1bd1595, infoPort=36129, infoSecurePort=0, ipcPort=41889, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:47:43,592 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c76340904cf388f with lease ID 0x4044d2563283191e: from storage DS-4a97975f-413b-4aa5-b37c-dd95d6a3dfa9 node DatanodeRegistration(127.0.0.1:46671, datanodeUuid=258b2754-7809-4dff-9d5a-1909f1bd1595, infoPort=36129, infoSecurePort=0, ipcPort=41889, storageInfo=lv=-57;cid=testClusterID;nsid=1495507183;c=1731671241800), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:47:44,097 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-15T11:47:44,100 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-15T11:47:44,103 ERROR [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c-prefix:7adf9b3d9d04,35267,1731671243773 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37531,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:44,103 WARN [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c-prefix:7adf9b3d9d04,35267,1731671243773 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37531,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:44,103 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C35267%2C1731671243773:(num 1731671258702) roll requested 2024-11-15T11:47:44,104 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C35267%2C1731671243773.1731671264103 2024-11-15T11:47:44,110 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 newFile=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671264103 2024-11-15T11:47:44,110 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:44,110 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:44,110 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:44,110 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:44,111 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:44,111 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671264103 2024-11-15T11:47:44,111 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37531,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:44,111 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37531,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:44,111 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 2024-11-15T11:47:44,112 WARN [IPC Server handler 1 on default port 37741 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-15T11:47:44,112 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 after 1ms 2024-11-15T11:47:44,112 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35409:35409),(127.0.0.1/127.0.0.1:36129:36129)] 2024-11-15T11:47:44,112 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 is not closed yet, will try archiving it next time 2024-11-15T11:47:44,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:44,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:44,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741837_1020 (size=2427) 2024-11-15T11:47:45,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:45,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:46,114 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 2024-11-15T11:47:46,136 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671264103 newFile=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 2024-11-15T11:47:46,136 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:46,136 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:46,136 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:46,137 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:46,137 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:46,137 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671264103 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 2024-11-15T11:47:46,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741838_1019 (size=1264) 2024-11-15T11:47:46,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741838_1019 (size=1264) 2024-11-15T11:47:46,140 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 is not closed yet, will try archiving it next time 2024-11-15T11:47:46,147 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35409:35409),(127.0.0.1/127.0.0.1:36129:36129)] 2024-11-15T11:47:46,147 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 is not closed yet, will try archiving it next time 2024-11-15T11:47:46,147 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 2024-11-15T11:47:46,147 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 2024-11-15T11:47:46,148 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 after 1ms 2024-11-15T11:47:46,148 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 2024-11-15T11:47:46,158 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731671245634/Put/vlen=218/seqid=0] 2024-11-15T11:47:46,158 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731671255353/Put/vlen=1045/seqid=0] 2024-11-15T11:47:46,158 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671244498 2024-11-15T11:47:46,158 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 2024-11-15T11:47:46,158 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 2024-11-15T11:47:46,159 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 after 1ms 2024-11-15T11:47:46,159 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 2024-11-15T11:47:46,164 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731671258701/Put/vlen=1045/seqid=0] 2024-11-15T11:47:46,165 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731671260728/Put/vlen=1045/seqid=0] 2024-11-15T11:47:46,165 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 2024-11-15T11:47:46,165 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671264103 2024-11-15T11:47:46,165 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671264103 2024-11-15T11:47:46,165 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671264103 after 0ms 2024-11-15T11:47:46,165 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671264103 2024-11-15T11:47:46,172 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731671264102/Put/vlen=1045/seqid=0] 2024-11-15T11:47:46,172 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 2024-11-15T11:47:46,172 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 2024-11-15T11:47:46,173 WARN [IPC Server handler 0 on default port 37741 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-15T11:47:46,173 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 after 1ms 2024-11-15T11:47:46,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:46,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:46,522 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T11:47:46,595 WARN [ResponseProcessor for block BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:46,595 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2013189632_22 at /127.0.0.1:41122 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43995:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41122 dst: /127.0.0.1:43995 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43995 remote=/127.0.0.1:41122]. Total timeout mills is 60000, 59540 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:46,595 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2013189632_22 at /127.0.0.1:39894 [Receiving block BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39894 dst: /127.0.0.1:46671 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:47:46,595 WARN [DataStreamer for file /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 block BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43995,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK], DatanodeInfoWithStorage[127.0.0.1:46671,DS-6d72e768-557c-42c6-8747-33329f0812f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43995,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]) is bad. 2024-11-15T11:47:46,596 WARN [DataStreamer for file /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 block BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:46,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741839_1022 (size=85) 2024-11-15T11:47:47,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:47,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:48,113 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671258702 after 4002ms 2024-11-15T11:47:48,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:48,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:49,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:49,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:50,174 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 after 4002ms 2024-11-15T11:47:50,174 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 2024-11-15T11:47:50,178 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 2024-11-15T11:47:50,178 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-15T11:47:50,179 ERROR [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c-prefix:7adf9b3d9d04,35267,1731671243773.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:50,179 WARN [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c-prefix:7adf9b3d9d04,35267,1731671243773.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:50,179 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C35267%2C1731671243773.meta:.meta(num 1731671244996) roll requested 2024-11-15T11:47:50,179 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671270179.meta 2024-11-15T11:47:50,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,185 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,185 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,185 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,185 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,185 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671270179.meta 2024-11-15T11:47:50,195 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:50,195 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:50,195 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta 2024-11-15T11:47:50,195 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36129:36129),(127.0.0.1/127.0.0.1:35409:35409)] 2024-11-15T11:47:50,195 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta is not closed yet, will try archiving it next time 2024-11-15T11:47:50,195 WARN [IPC Server handler 3 on default port 37741 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-15T11:47:50,196 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta after 1ms 2024-11-15T11:47:50,211 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/.tmp/info/3e13de1018d84325bf5493ea02975951 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32./info:regioninfo/1731671245638/Put/seqid=0 2024-11-15T11:47:50,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741841_1025 (size=7125) 2024-11-15T11:47:50,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741841_1025 (size=7125) 2024-11-15T11:47:50,229 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/.tmp/info/3e13de1018d84325bf5493ea02975951 2024-11-15T11:47:50,257 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/.tmp/ns/c90589006b2e45fcae06a49ede85c7de is 43, key is default/ns:d/1731671245161/Put/seqid=0 2024-11-15T11:47:50,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741842_1026 (size=5153) 2024-11-15T11:47:50,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741842_1026 (size=5153) 2024-11-15T11:47:50,263 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/.tmp/ns/c90589006b2e45fcae06a49ede85c7de 2024-11-15T11:47:50,288 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/.tmp/table/3a14d61906d24a3f9f06e598c73aef1d is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731671245648/Put/seqid=0 2024-11-15T11:47:50,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741843_1027 (size=5438) 2024-11-15T11:47:50,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741843_1027 (size=5438) 2024-11-15T11:47:50,294 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/.tmp/table/3a14d61906d24a3f9f06e598c73aef1d 2024-11-15T11:47:50,301 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/.tmp/info/3e13de1018d84325bf5493ea02975951 as hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/info/3e13de1018d84325bf5493ea02975951 2024-11-15T11:47:50,308 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/info/3e13de1018d84325bf5493ea02975951, entries=10, sequenceid=11, filesize=7.0 K 2024-11-15T11:47:50,309 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/.tmp/ns/c90589006b2e45fcae06a49ede85c7de as hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/ns/c90589006b2e45fcae06a49ede85c7de 2024-11-15T11:47:50,317 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/ns/c90589006b2e45fcae06a49ede85c7de, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T11:47:50,318 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/.tmp/table/3a14d61906d24a3f9f06e598c73aef1d as hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/table/3a14d61906d24a3f9f06e598c73aef1d 2024-11-15T11:47:50,325 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/table/3a14d61906d24a3f9f06e598c73aef1d, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T11:47:50,327 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 148ms, sequenceid=11, compaction requested=false 2024-11-15T11:47:50,327 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T11:47:50,327 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d51577c3ee67012e0e09c8becd49aa32 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-15T11:47:50,327 ERROR [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c-prefix:7adf9b3d9d04,35267,1731671243773 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:50,328 WARN [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c-prefix:7adf9b3d9d04,35267,1731671243773 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:50,329 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C35267%2C1731671243773:(num 1731671266113) roll requested 2024-11-15T11:47:50,329 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C35267%2C1731671243773.1731671270329 2024-11-15T11:47:50,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:50,340 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 newFile=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671270329 2024-11-15T11:47:50,340 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:50,340 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,340 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,340 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,340 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,340 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671270329 2024-11-15T11:47:50,340 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:50,341 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1151629165-172.17.0.2-1731671241800:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:50,341 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 2024-11-15T11:47:50,342 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 after 1ms 2024-11-15T11:47:50,342 DEBUG [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35409:35409),(127.0.0.1/127.0.0.1:36129:36129)] 2024-11-15T11:47:50,347 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 to hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/oldWALs/7adf9b3d9d04%2C35267%2C1731671243773.1731671266113 2024-11-15T11:47:50,364 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d51577c3ee67012e0e09c8becd49aa32/.tmp/info/9674c921b4c2422f87ff26a71783bd34 is 1080, key is row1002/info:/1731671255353/Put/seqid=0 2024-11-15T11:47:50,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741845_1029 (size=9270) 2024-11-15T11:47:50,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741845_1029 (size=9270) 2024-11-15T11:47:50,374 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d51577c3ee67012e0e09c8becd49aa32/.tmp/info/9674c921b4c2422f87ff26a71783bd34 2024-11-15T11:47:50,381 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d51577c3ee67012e0e09c8becd49aa32/.tmp/info/9674c921b4c2422f87ff26a71783bd34 as hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d51577c3ee67012e0e09c8becd49aa32/info/9674c921b4c2422f87ff26a71783bd34 2024-11-15T11:47:50,388 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d51577c3ee67012e0e09c8becd49aa32/info/9674c921b4c2422f87ff26a71783bd34, entries=4, sequenceid=8, filesize=9.1 K 2024-11-15T11:47:50,390 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for d51577c3ee67012e0e09c8becd49aa32 in 62ms, sequenceid=8, compaction requested=false 2024-11-15T11:47:50,390 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d51577c3ee67012e0e09c8becd49aa32: 2024-11-15T11:47:50,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T11:47:50,396 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:47:50,397 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:47:50,397 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:50,397 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:50,397 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:47:50,397 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T11:47:50,397 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=833652567, stopped=false 2024-11-15T11:47:50,397 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7adf9b3d9d04,45917,1731671243600 2024-11-15T11:47:50,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:47:50,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:47:50,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:50,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:50,447 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:47:50,448 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:47:50,448 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:47:50,448 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:50,448 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:47:50,448 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:47:50,448 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7adf9b3d9d04,35267,1731671243773' ***** 2024-11-15T11:47:50,448 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:47:50,449 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:47:50,449 INFO [RS:0;7adf9b3d9d04:35267 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:47:50,449 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:47:50,449 INFO [RS:0;7adf9b3d9d04:35267 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:47:50,449 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(3091): Received CLOSE for d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:50,449 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(959): stopping server 7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:50,449 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:47:50,449 INFO [RS:0;7adf9b3d9d04:35267 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7adf9b3d9d04:35267. 2024-11-15T11:47:50,449 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d51577c3ee67012e0e09c8becd49aa32, disabling compactions & flushes 2024-11-15T11:47:50,449 DEBUG [RS:0;7adf9b3d9d04:35267 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:47:50,449 DEBUG [RS:0;7adf9b3d9d04:35267 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:50,449 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:50,449 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:50,449 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:47:50,449 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. after waiting 0 ms 2024-11-15T11:47:50,449 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:47:50,449 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:47:50,449 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:50,450 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T11:47:50,450 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T11:47:50,450 DEBUG [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, d51577c3ee67012e0e09c8becd49aa32=TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32.} 2024-11-15T11:47:50,450 DEBUG [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d51577c3ee67012e0e09c8becd49aa32 2024-11-15T11:47:50,450 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:47:50,450 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:47:50,450 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:47:50,450 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:47:50,450 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:47:50,454 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d51577c3ee67012e0e09c8becd49aa32/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-15T11:47:50,454 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T11:47:50,455 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:50,455 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:47:50,455 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d51577c3ee67012e0e09c8becd49aa32: Waiting for close lock at 1731671270449Running coprocessor pre-close hooks at 1731671270449Disabling compacts and flushes for region at 1731671270449Disabling writes for close at 1731671270449Writing region close event to WAL at 1731671270450 (+1 ms)Running coprocessor post-close hooks at 1731671270455 (+5 ms)Closed at 1731671270455 2024-11-15T11:47:50,455 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:47:50,455 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671270450Running coprocessor pre-close hooks at 1731671270450Disabling compacts and flushes for region at 1731671270450Disabling writes for close at 1731671270450Writing region close event to WAL at 1731671270451 (+1 ms)Running coprocessor post-close hooks at 1731671270455 (+4 ms)Closed at 1731671270455 2024-11-15T11:47:50,455 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731671245245.d51577c3ee67012e0e09c8becd49aa32. 2024-11-15T11:47:50,455 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T11:47:50,650 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(976): stopping server 7adf9b3d9d04,35267,1731671243773; all regions closed. 2024-11-15T11:47:50,651 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,651 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,651 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,651 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:50,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741840_1023 (size=825) 2024-11-15T11:47:50,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741840_1023 (size=825) 2024-11-15T11:47:51,016 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:47:51,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T11:47:51,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T11:47:51,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:51,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:51,347 INFO [regionserver/7adf9b3d9d04:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T11:47:51,348 INFO [regionserver/7adf9b3d9d04:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T11:47:52,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:52,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:52,352 INFO [regionserver/7adf9b3d9d04:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:47:52,592 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T11:47:53,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:53,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:53,581 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:47:54,196 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta after 4001ms 2024-11-15T11:47:54,197 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/WALs/7adf9b3d9d04,35267,1731671243773/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta to hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/oldWALs/7adf9b3d9d04%2C35267%2C1731671243773.meta.1731671244996.meta 2024-11-15T11:47:54,200 DEBUG [RS:0;7adf9b3d9d04:35267 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/oldWALs 2024-11-15T11:47:54,200 INFO [RS:0;7adf9b3d9d04:35267 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C35267%2C1731671243773.meta:.meta(num 1731671270179) 2024-11-15T11:47:54,200 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,201 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,201 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,201 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,201 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741844_1028 (size=1162) 2024-11-15T11:47:54,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741844_1028 (size=1162) 2024-11-15T11:47:54,208 DEBUG [RS:0;7adf9b3d9d04:35267 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/oldWALs 2024-11-15T11:47:54,208 INFO [RS:0;7adf9b3d9d04:35267 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C35267%2C1731671243773:(num 1731671270329) 2024-11-15T11:47:54,208 DEBUG [RS:0;7adf9b3d9d04:35267 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:47:54,208 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:47:54,208 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:47:54,208 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.ChoreService(370): Chore service for: regionserver/7adf9b3d9d04:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T11:47:54,209 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:47:54,209 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:47:54,209 INFO [RS:0;7adf9b3d9d04:35267 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35267 2024-11-15T11:47:54,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:47:54,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7adf9b3d9d04,35267,1731671243773 2024-11-15T11:47:54,271 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:47:54,272 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$363/0x00007f2790902658@7e9b14c7 rejected from java.util.concurrent.ThreadPoolExecutor@3b998570[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-15T11:47:54,280 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7adf9b3d9d04,35267,1731671243773] 2024-11-15T11:47:54,288 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7adf9b3d9d04,35267,1731671243773 already deleted, retry=false 2024-11-15T11:47:54,288 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7adf9b3d9d04,35267,1731671243773 expired; onlineServers=0 2024-11-15T11:47:54,288 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7adf9b3d9d04,45917,1731671243600' ***** 2024-11-15T11:47:54,288 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T11:47:54,288 INFO [M:0;7adf9b3d9d04:45917 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:47:54,288 INFO [M:0;7adf9b3d9d04:45917 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:47:54,289 DEBUG [M:0;7adf9b3d9d04:45917 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T11:47:54,289 DEBUG [M:0;7adf9b3d9d04:45917 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T11:47:54,289 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T11:47:54,289 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671244287 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671244287,5,FailOnTimeoutGroup] 2024-11-15T11:47:54,289 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671244297 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671244297,5,FailOnTimeoutGroup] 2024-11-15T11:47:54,289 INFO [M:0;7adf9b3d9d04:45917 {}] hbase.ChoreService(370): Chore service for: master/7adf9b3d9d04:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T11:47:54,289 INFO [M:0;7adf9b3d9d04:45917 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:47:54,289 DEBUG [M:0;7adf9b3d9d04:45917 {}] master.HMaster(1795): Stopping service threads 2024-11-15T11:47:54,289 INFO [M:0;7adf9b3d9d04:45917 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T11:47:54,289 INFO [M:0;7adf9b3d9d04:45917 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:47:54,289 INFO [M:0;7adf9b3d9d04:45917 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T11:47:54,290 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T11:47:54,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T11:47:54,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:47:54,296 DEBUG [M:0;7adf9b3d9d04:45917 {}] zookeeper.ZKUtil(347): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T11:47:54,297 WARN [M:0;7adf9b3d9d04:45917 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T11:47:54,297 INFO [M:0;7adf9b3d9d04:45917 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/.lastflushedseqids 2024-11-15T11:47:54,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741846_1030 (size=111) 2024-11-15T11:47:54,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741846_1030 (size=111) 2024-11-15T11:47:54,305 INFO [M:0;7adf9b3d9d04:45917 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T11:47:54,305 INFO [M:0;7adf9b3d9d04:45917 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T11:47:54,305 DEBUG [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:47:54,306 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:54,306 DEBUG [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:54,306 DEBUG [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:47:54,306 DEBUG [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:54,306 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-15T11:47:54,306 ERROR [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData-prefix:7adf9b3d9d04,45917,1731671243600 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:54,306 WARN [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData-prefix:7adf9b3d9d04,45917,1731671243600 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:54,306 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 7adf9b3d9d04%2C45917%2C1731671243600:(num 1731671243971) roll requested 2024-11-15T11:47:54,307 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C45917%2C1731671243600.1731671274307 2024-11-15T11:47:54,311 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,311 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,311 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,311 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,312 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,312 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600/7adf9b3d9d04%2C45917%2C1731671243600.1731671274307 2024-11-15T11:47:54,312 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:54,312 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36437,DS-edbaecff-ceeb-4755-86c7-e19dc7813ddc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T11:47:54,312 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 2024-11-15T11:47:54,312 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35409:35409),(127.0.0.1/127.0.0.1:36129:36129)] 2024-11-15T11:47:54,312 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 is not closed yet, will try archiving it next time 2024-11-15T11:47:54,313 WARN [IPC Server handler 1 on default port 37741 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-15T11:47:54,313 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 after 1ms 2024-11-15T11:47:54,329 DEBUG [M:0;7adf9b3d9d04:45917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c7fcbbba66b481eba325eea16366c63 is 82, key is hbase:meta,,1/info:regioninfo/1731671245068/Put/seqid=0 2024-11-15T11:47:54,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741848_1033 (size=5672) 2024-11-15T11:47:54,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741848_1033 (size=5672) 2024-11-15T11:47:54,334 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c7fcbbba66b481eba325eea16366c63 2024-11-15T11:47:54,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:54,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:54,355 DEBUG [M:0;7adf9b3d9d04:45917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb7a587b0c3a4695a3a0ef97c3581d4c is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731671245653/Put/seqid=0 2024-11-15T11:47:54,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741849_1034 (size=6118) 2024-11-15T11:47:54,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741849_1034 (size=6118) 2024-11-15T11:47:54,361 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb7a587b0c3a4695a3a0ef97c3581d4c 2024-11-15T11:47:54,379 DEBUG [M:0;7adf9b3d9d04:45917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c8d601342a18434ba6924d0380e3fee0 is 69, key is 7adf9b3d9d04,35267,1731671243773/rs:state/1731671244301/Put/seqid=0 2024-11-15T11:47:54,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:47:54,380 INFO [RS:0;7adf9b3d9d04:35267 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:47:54,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35267-0x1013f9bce050001, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:47:54,380 INFO [RS:0;7adf9b3d9d04:35267 {}] regionserver.HRegionServer(1031): Exiting; stopping=7adf9b3d9d04,35267,1731671243773; zookeeper connection closed. 2024-11-15T11:47:54,380 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@66ecbee7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@66ecbee7 2024-11-15T11:47:54,380 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T11:47:54,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741850_1035 (size=5156) 2024-11-15T11:47:54,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741850_1035 (size=5156) 2024-11-15T11:47:54,388 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c8d601342a18434ba6924d0380e3fee0 2024-11-15T11:47:54,409 DEBUG [M:0;7adf9b3d9d04:45917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f47c540949a54b2f9f02b3563c6c0f5e is 52, key is load_balancer_on/state:d/1731671245238/Put/seqid=0 2024-11-15T11:47:54,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741851_1036 (size=5056) 2024-11-15T11:47:54,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741851_1036 (size=5056) 2024-11-15T11:47:54,416 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f47c540949a54b2f9f02b3563c6c0f5e 2024-11-15T11:47:54,421 DEBUG [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c7fcbbba66b481eba325eea16366c63 as hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c7fcbbba66b481eba325eea16366c63 2024-11-15T11:47:54,428 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c7fcbbba66b481eba325eea16366c63, entries=8, sequenceid=56, filesize=5.5 K 2024-11-15T11:47:54,429 DEBUG [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb7a587b0c3a4695a3a0ef97c3581d4c as hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb7a587b0c3a4695a3a0ef97c3581d4c 2024-11-15T11:47:54,435 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb7a587b0c3a4695a3a0ef97c3581d4c, entries=6, sequenceid=56, filesize=6.0 K 2024-11-15T11:47:54,436 DEBUG [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c8d601342a18434ba6924d0380e3fee0 as hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c8d601342a18434ba6924d0380e3fee0 2024-11-15T11:47:54,442 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c8d601342a18434ba6924d0380e3fee0, entries=1, sequenceid=56, filesize=5.0 K 2024-11-15T11:47:54,443 DEBUG [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f47c540949a54b2f9f02b3563c6c0f5e as hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f47c540949a54b2f9f02b3563c6c0f5e 2024-11-15T11:47:54,449 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f47c540949a54b2f9f02b3563c6c0f5e, entries=1, sequenceid=56, filesize=4.9 K 2024-11-15T11:47:54,450 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=56, compaction requested=false 2024-11-15T11:47:54,463 INFO [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:47:54,464 DEBUG [M:0;7adf9b3d9d04:45917 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671274305Disabling compacts and flushes for region at 1731671274305Disabling writes for close at 1731671274306 (+1 ms)Obtaining lock to block concurrent updates at 1731671274306Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731671274306Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731671274306Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731671274313 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731671274313Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731671274328 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731671274328Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731671274339 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731671274354 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731671274354Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731671274365 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731671274379 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731671274379Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731671274393 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731671274409 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731671274409Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56a13d31: reopening flushed file at 1731671274420 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@479a93b3: reopening flushed file at 1731671274428 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a7a5738: reopening flushed file at 1731671274435 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@292095cf: reopening flushed file at 1731671274442 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=56, compaction requested=false at 1731671274450 (+8 ms)Writing region close event to WAL at 1731671274463 (+13 ms)Closed at 1731671274463 2024-11-15T11:47:54,464 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,464 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,464 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,464 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,464 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:47:54,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46671 is added to blk_1073741847_1031 (size=757) 2024-11-15T11:47:54,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43995 is added to blk_1073741847_1031 (size=757) 2024-11-15T11:47:55,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:55,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:55,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,592 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T11:47:55,989 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T11:47:55,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:55,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,015 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,015 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,016 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:47:56,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:56,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:57,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:57,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:58,314 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 after 4002ms 2024-11-15T11:47:58,314 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/WALs/7adf9b3d9d04,45917,1731671243600/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 to hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/oldWALs/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 2024-11-15T11:47:58,317 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/MasterData/oldWALs/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971 to hdfs://localhost:37741/user/jenkins/test-data/736a7af1-5940-de88-7596-ba0089bd2c8c/oldWALs/7adf9b3d9d04%2C45917%2C1731671243600.1731671243971$masterlocalwal$ 2024-11-15T11:47:58,317 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:47:58,317 INFO [M:0;7adf9b3d9d04:45917 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T11:47:58,317 INFO [M:0;7adf9b3d9d04:45917 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45917 2024-11-15T11:47:58,318 INFO [M:0;7adf9b3d9d04:45917 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:47:58,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:58,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:58,454 INFO [M:0;7adf9b3d9d04:45917 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:47:58,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:47:58,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45917-0x1013f9bce050000, quorum=127.0.0.1:53987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:47:58,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39e17fd3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:58,457 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b16a6e2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:47:58,457 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:47:58,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fc558ce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:47:58,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@160f7ba9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,STOPPED} 2024-11-15T11:47:58,458 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:47:58,458 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:47:58,458 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1151629165-172.17.0.2-1731671241800 (Datanode Uuid 258b2754-7809-4dff-9d5a-1909f1bd1595) service to localhost/127.0.0.1:37741 2024-11-15T11:47:58,458 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:47:58,459 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data3/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:58,459 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data4/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:58,460 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:47:58,462 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@48919a97{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:58,462 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4778d192{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:47:58,462 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:47:58,462 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d237fac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:47:58,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a79eeb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,STOPPED} 2024-11-15T11:47:58,464 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:47:58,464 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:47:58,464 WARN [BP-1151629165-172.17.0.2-1731671241800 heartbeating to localhost/127.0.0.1:37741 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1151629165-172.17.0.2-1731671241800 (Datanode Uuid 7f3e0bea-514b-4e1b-8723-3bd65959348a) service to localhost/127.0.0.1:37741 2024-11-15T11:47:58,464 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:47:58,465 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data1/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:58,465 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/cluster_a2bb061e-96ca-9eeb-27a3-0e3501365403/data/data2/current/BP-1151629165-172.17.0.2-1731671241800 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:47:58,465 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:47:58,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@50fbcccf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:47:58,472 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6699fa8b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:47:58,472 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:47:58,473 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@179d1ca6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:47:58,473 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@624b3986{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir/,STOPPED} 2024-11-15T11:47:58,479 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T11:47:58,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T11:47:58,510 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37741 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37741 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37741 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37741 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37741 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37741 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:37741 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37741 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=252 (was 264), ProcessCount=11 (was 11), AvailableMemoryMB=10514 (was 10792) 2024-11-15T11:47:58,517 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=252, ProcessCount=11, AvailableMemoryMB=10514 2024-11-15T11:47:58,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T11:47:58,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.log.dir so I do NOT create it in target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d 2024-11-15T11:47:58,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8e8e0780-4f08-c53b-5edc-25ec7064f3b0/hadoop.tmp.dir so I do NOT create it in target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d 2024-11-15T11:47:58,518 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770, deleteOnExit=true 2024-11-15T11:47:58,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T11:47:58,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/test.cache.data in system properties and HBase conf 2024-11-15T11:47:58,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T11:47:58,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/hadoop.log.dir in system properties and HBase conf 2024-11-15T11:47:58,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T11:47:58,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T11:47:58,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T11:47:58,519 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T11:47:58,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:47:58,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:47:58,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T11:47:58,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:47:58,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T11:47:58,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T11:47:58,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:47:58,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:47:58,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T11:47:58,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/nfs.dump.dir in system properties and HBase conf 2024-11-15T11:47:58,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/java.io.tmpdir in system properties and HBase conf 2024-11-15T11:47:58,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:47:58,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T11:47:58,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T11:47:58,534 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:47:58,789 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:58,793 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:58,795 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:58,795 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:58,795 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:47:58,795 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:58,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6da95783{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:58,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@df163d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:58,902 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30d9f702{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/java.io.tmpdir/jetty-localhost-43393-hadoop-hdfs-3_4_1-tests_jar-_-any-4351336454075512097/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:47:58,902 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d483d07{HTTP/1.1, (http/1.1)}{localhost:43393} 2024-11-15T11:47:58,902 INFO [Time-limited test {}] server.Server(415): Started @194656ms 2024-11-15T11:47:58,916 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:47:59,111 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:59,115 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:59,116 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:59,116 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:59,116 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:47:59,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@278dab99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:59,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e6bebf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:59,229 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e4582a5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/java.io.tmpdir/jetty-localhost-34419-hadoop-hdfs-3_4_1-tests_jar-_-any-7104969564404745380/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:59,230 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37d74326{HTTP/1.1, (http/1.1)}{localhost:34419} 2024-11-15T11:47:59,230 INFO [Time-limited test {}] server.Server(415): Started @194984ms 2024-11-15T11:47:59,231 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:47:59,260 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:47:59,263 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:47:59,264 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:47:59,264 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:47:59,264 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:47:59,264 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7009eb0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:47:59,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65345c29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:47:59,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:59,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:47:59,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14b98ef8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/java.io.tmpdir/jetty-localhost-33033-hadoop-hdfs-3_4_1-tests_jar-_-any-14054359795443299213/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:47:59,374 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7fc2e521{HTTP/1.1, (http/1.1)}{localhost:33033} 2024-11-15T11:47:59,374 INFO [Time-limited test {}] server.Server(415): Started @195128ms 2024-11-15T11:47:59,375 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:47:59,979 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/data/data1/current/BP-166489227-172.17.0.2-1731671278544/current, will proceed with Du for space computation calculation, 2024-11-15T11:47:59,979 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/data/data2/current/BP-166489227-172.17.0.2-1731671278544/current, will proceed with Du for space computation calculation, 2024-11-15T11:48:00,000 WARN [Thread-1621 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:48:00,003 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15e4b4b80905936c with lease ID 0xc924bfcf9dfd59f5: Processing first storage report for DS-19634500-898d-4b90-8e7f-1cdb601577ee from datanode DatanodeRegistration(127.0.0.1:46719, datanodeUuid=ad5c07a1-4fad-4172-a5cb-cf56a648de9d, infoPort=42767, infoSecurePort=0, ipcPort=38721, storageInfo=lv=-57;cid=testClusterID;nsid=1399626212;c=1731671278544) 2024-11-15T11:48:00,003 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15e4b4b80905936c with lease ID 0xc924bfcf9dfd59f5: from storage DS-19634500-898d-4b90-8e7f-1cdb601577ee node DatanodeRegistration(127.0.0.1:46719, datanodeUuid=ad5c07a1-4fad-4172-a5cb-cf56a648de9d, infoPort=42767, infoSecurePort=0, ipcPort=38721, storageInfo=lv=-57;cid=testClusterID;nsid=1399626212;c=1731671278544), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:48:00,003 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15e4b4b80905936c with lease ID 0xc924bfcf9dfd59f5: Processing first storage report for DS-dc197076-ff6e-4be7-9e61-ee1de32f25d0 from datanode DatanodeRegistration(127.0.0.1:46719, datanodeUuid=ad5c07a1-4fad-4172-a5cb-cf56a648de9d, infoPort=42767, infoSecurePort=0, ipcPort=38721, storageInfo=lv=-57;cid=testClusterID;nsid=1399626212;c=1731671278544) 2024-11-15T11:48:00,003 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15e4b4b80905936c with lease ID 0xc924bfcf9dfd59f5: from storage DS-dc197076-ff6e-4be7-9e61-ee1de32f25d0 node DatanodeRegistration(127.0.0.1:46719, datanodeUuid=ad5c07a1-4fad-4172-a5cb-cf56a648de9d, infoPort=42767, infoSecurePort=0, ipcPort=38721, storageInfo=lv=-57;cid=testClusterID;nsid=1399626212;c=1731671278544), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:48:00,163 WARN [Thread-1668 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/data/data3/current/BP-166489227-172.17.0.2-1731671278544/current, will proceed with Du for space computation calculation, 2024-11-15T11:48:00,163 WARN [Thread-1669 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/data/data4/current/BP-166489227-172.17.0.2-1731671278544/current, will proceed with Du for space computation calculation, 2024-11-15T11:48:00,184 WARN [Thread-1644 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:48:00,186 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d9e1bdf7012311b with lease ID 0xc924bfcf9dfd59f6: Processing first storage report for DS-e360ecf4-5bdf-4b4d-9a46-ba0307a08244 from datanode DatanodeRegistration(127.0.0.1:44127, datanodeUuid=91b76560-df27-4a94-a756-fc127c9aa7ad, infoPort=40305, infoSecurePort=0, ipcPort=42777, storageInfo=lv=-57;cid=testClusterID;nsid=1399626212;c=1731671278544) 2024-11-15T11:48:00,186 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d9e1bdf7012311b with lease ID 0xc924bfcf9dfd59f6: from storage DS-e360ecf4-5bdf-4b4d-9a46-ba0307a08244 node DatanodeRegistration(127.0.0.1:44127, datanodeUuid=91b76560-df27-4a94-a756-fc127c9aa7ad, infoPort=40305, infoSecurePort=0, ipcPort=42777, storageInfo=lv=-57;cid=testClusterID;nsid=1399626212;c=1731671278544), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:48:00,186 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d9e1bdf7012311b with lease ID 0xc924bfcf9dfd59f6: Processing first storage report for DS-7b9665d2-e375-4071-b25d-c00050306455 from datanode DatanodeRegistration(127.0.0.1:44127, datanodeUuid=91b76560-df27-4a94-a756-fc127c9aa7ad, infoPort=40305, infoSecurePort=0, ipcPort=42777, storageInfo=lv=-57;cid=testClusterID;nsid=1399626212;c=1731671278544) 2024-11-15T11:48:00,186 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d9e1bdf7012311b with lease ID 0xc924bfcf9dfd59f6: from storage DS-7b9665d2-e375-4071-b25d-c00050306455 node DatanodeRegistration(127.0.0.1:44127, datanodeUuid=91b76560-df27-4a94-a756-fc127c9aa7ad, infoPort=40305, infoSecurePort=0, ipcPort=42777, storageInfo=lv=-57;cid=testClusterID;nsid=1399626212;c=1731671278544), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:48:00,203 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d 2024-11-15T11:48:00,205 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/zookeeper_0, clientPort=62622, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T11:48:00,206 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62622 2024-11-15T11:48:00,207 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:00,208 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:00,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:48:00,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:48:00,220 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a with version=8 2024-11-15T11:48:00,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/hbase-staging 2024-11-15T11:48:00,222 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:48:00,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:00,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:00,223 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:48:00,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:00,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:48:00,223 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T11:48:00,223 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:48:00,224 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42211 2024-11-15T11:48:00,226 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42211 connecting to ZooKeeper ensemble=127.0.0.1:62622 2024-11-15T11:48:00,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:422110x0, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:48:00,279 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42211-0x1013f9c5d120000 connected 2024-11-15T11:48:00,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:00,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:00,352 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:00,355 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:00,358 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:48:00,358 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a, hbase.cluster.distributed=false 2024-11-15T11:48:00,361 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:48:00,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42211 2024-11-15T11:48:00,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42211 2024-11-15T11:48:00,362 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42211 2024-11-15T11:48:00,362 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42211 2024-11-15T11:48:00,362 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42211 2024-11-15T11:48:00,377 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:48:00,377 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:00,377 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:00,378 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:48:00,378 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:00,378 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:48:00,378 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:48:00,378 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:48:00,379 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44361 2024-11-15T11:48:00,380 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44361 connecting to ZooKeeper ensemble=127.0.0.1:62622 2024-11-15T11:48:00,381 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:00,382 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:00,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:443610x0, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:48:00,394 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:443610x0, quorum=127.0.0.1:62622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:48:00,394 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44361-0x1013f9c5d120001 connected 2024-11-15T11:48:00,394 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:48:00,395 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:48:00,395 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T11:48:00,396 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:48:00,397 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44361 2024-11-15T11:48:00,397 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44361 2024-11-15T11:48:00,397 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44361 2024-11-15T11:48:00,398 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44361 2024-11-15T11:48:00,398 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44361 2024-11-15T11:48:00,411 DEBUG [M:0;7adf9b3d9d04:42211 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7adf9b3d9d04:42211 2024-11-15T11:48:00,411 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7adf9b3d9d04,42211,1731671280222 2024-11-15T11:48:00,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:48:00,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:48:00,419 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7adf9b3d9d04,42211,1731671280222 2024-11-15T11:48:00,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T11:48:00,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:00,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:00,427 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T11:48:00,428 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7adf9b3d9d04,42211,1731671280222 from backup master directory 2024-11-15T11:48:00,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7adf9b3d9d04,42211,1731671280222 2024-11-15T11:48:00,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:48:00,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:48:00,435 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:48:00,435 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7adf9b3d9d04,42211,1731671280222 2024-11-15T11:48:00,440 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/hbase.id] with ID: 5dc1ca8a-b035-4ed8-9dda-94b81fa102a9 2024-11-15T11:48:00,440 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/.tmp/hbase.id 2024-11-15T11:48:00,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:48:00,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:48:00,448 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/.tmp/hbase.id]:[hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/hbase.id] 2024-11-15T11:48:00,460 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:00,460 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T11:48:00,461 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-15T11:48:00,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:00,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:00,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:48:00,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:48:00,475 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:48:00,476 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T11:48:00,477 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:48:00,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:48:00,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:48:00,486 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store 2024-11-15T11:48:00,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:48:00,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:48:00,495 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:00,495 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:48:00,495 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:00,495 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:00,496 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:48:00,496 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:00,496 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:00,496 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671280495Disabling compacts and flushes for region at 1731671280495Disabling writes for close at 1731671280496 (+1 ms)Writing region close event to WAL at 1731671280496Closed at 1731671280496 2024-11-15T11:48:00,501 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/.initializing 2024-11-15T11:48:00,501 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/WALs/7adf9b3d9d04,42211,1731671280222 2024-11-15T11:48:00,503 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C42211%2C1731671280222, suffix=, logDir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/WALs/7adf9b3d9d04,42211,1731671280222, archiveDir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/oldWALs, maxLogs=10 2024-11-15T11:48:00,504 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C42211%2C1731671280222.1731671280504 2024-11-15T11:48:00,508 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/WALs/7adf9b3d9d04,42211,1731671280222/7adf9b3d9d04%2C42211%2C1731671280222.1731671280504 2024-11-15T11:48:00,509 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40305:40305),(127.0.0.1/127.0.0.1:42767:42767)] 2024-11-15T11:48:00,509 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:48:00,509 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:00,509 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,509 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,511 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,512 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T11:48:00,512 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:00,512 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:00,513 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,513 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T11:48:00,513 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:00,514 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:48:00,514 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,515 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T11:48:00,515 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:00,515 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:48:00,515 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,516 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T11:48:00,517 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:00,517 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:48:00,517 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,518 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,518 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,520 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,520 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,521 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T11:48:00,522 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:00,524 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:48:00,524 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722847, jitterRate=-0.0808536559343338}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T11:48:00,525 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731671280510Initializing all the Stores at 1731671280510Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671280510Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671280511 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671280511Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671280511Cleaning up temporary data from old regions at 1731671280520 (+9 ms)Region opened successfully at 1731671280525 (+5 ms) 2024-11-15T11:48:00,525 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T11:48:00,529 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fdecaa2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:48:00,530 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T11:48:00,530 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T11:48:00,530 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T11:48:00,530 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T11:48:00,531 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T11:48:00,531 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T11:48:00,531 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T11:48:00,534 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T11:48:00,535 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T11:48:00,568 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T11:48:00,569 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T11:48:00,570 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T11:48:00,577 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T11:48:00,577 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T11:48:00,578 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T11:48:00,585 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T11:48:00,586 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T11:48:00,593 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T11:48:00,595 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T11:48:00,601 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T11:48:00,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:48:00,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:48:00,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:00,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:00,610 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7adf9b3d9d04,42211,1731671280222, sessionid=0x1013f9c5d120000, setting cluster-up flag (Was=false) 2024-11-15T11:48:00,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:00,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:00,652 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T11:48:00,653 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,42211,1731671280222 2024-11-15T11:48:00,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:00,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:00,702 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T11:48:00,702 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,42211,1731671280222 2024-11-15T11:48:00,704 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T11:48:00,706 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T11:48:00,706 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T11:48:00,706 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T11:48:00,706 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7adf9b3d9d04,42211,1731671280222 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T11:48:00,707 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:48:00,708 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:48:00,708 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:48:00,708 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:48:00,708 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7adf9b3d9d04:0, corePoolSize=10, maxPoolSize=10 2024-11-15T11:48:00,708 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,708 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:48:00,708 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,709 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731671310709 2024-11-15T11:48:00,709 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T11:48:00,709 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T11:48:00,709 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T11:48:00,709 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T11:48:00,709 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T11:48:00,709 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T11:48:00,709 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,710 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:48:00,710 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T11:48:00,710 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T11:48:00,710 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T11:48:00,710 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T11:48:00,710 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T11:48:00,710 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T11:48:00,710 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671280710,5,FailOnTimeoutGroup] 2024-11-15T11:48:00,711 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:00,711 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T11:48:00,714 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671280710,5,FailOnTimeoutGroup] 2024-11-15T11:48:00,714 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,715 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T11:48:00,715 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,715 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:48:00,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:48:00,730 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T11:48:00,730 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a 2024-11-15T11:48:00,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:48:00,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:48:00,750 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:00,751 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:48:00,752 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:48:00,752 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:00,753 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:00,753 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:48:00,754 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:48:00,754 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:00,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:00,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:48:00,755 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:48:00,755 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:00,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:00,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:48:00,757 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:48:00,757 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:00,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:00,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:48:00,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740 2024-11-15T11:48:00,759 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740 2024-11-15T11:48:00,760 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:48:00,760 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:48:00,760 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:48:00,761 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:48:00,763 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:48:00,764 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818354, jitterRate=0.04059125483036041}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:48:00,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731671280750Initializing all the Stores at 1731671280751 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671280751Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671280751Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671280751Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671280751Cleaning up temporary data from old regions at 1731671280760 (+9 ms)Region opened successfully at 1731671280764 (+4 ms) 2024-11-15T11:48:00,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:48:00,764 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:48:00,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:48:00,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:48:00,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:48:00,765 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:48:00,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671280764Disabling compacts and flushes for region at 1731671280764Disabling writes for close at 1731671280764Writing region close event to WAL at 1731671280765 (+1 ms)Closed at 1731671280765 2024-11-15T11:48:00,766 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:48:00,766 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T11:48:00,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T11:48:00,768 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:48:00,769 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T11:48:00,800 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(746): ClusterId : 5dc1ca8a-b035-4ed8-9dda-94b81fa102a9 2024-11-15T11:48:00,800 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:48:00,819 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:48:00,819 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:48:00,827 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:48:00,828 DEBUG [RS:0;7adf9b3d9d04:44361 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@283c5a42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:48:00,840 DEBUG [RS:0;7adf9b3d9d04:44361 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7adf9b3d9d04:44361 2024-11-15T11:48:00,841 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:48:00,841 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:48:00,841 DEBUG [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:48:00,841 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(2659): reportForDuty to master=7adf9b3d9d04,42211,1731671280222 with port=44361, startcode=1731671280377 2024-11-15T11:48:00,842 DEBUG [RS:0;7adf9b3d9d04:44361 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:48:00,844 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52955, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:48:00,844 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42211 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:00,844 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42211 {}] master.ServerManager(517): Registering regionserver=7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:00,846 DEBUG [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a 2024-11-15T11:48:00,846 DEBUG [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38977 2024-11-15T11:48:00,846 DEBUG [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:48:00,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:48:00,857 DEBUG [RS:0;7adf9b3d9d04:44361 {}] zookeeper.ZKUtil(111): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:00,857 WARN [RS:0;7adf9b3d9d04:44361 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:48:00,857 INFO [RS:0;7adf9b3d9d04:44361 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:48:00,857 DEBUG [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:00,858 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7adf9b3d9d04,44361,1731671280377] 2024-11-15T11:48:00,861 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:48:00,862 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:48:00,863 INFO [RS:0;7adf9b3d9d04:44361 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:48:00,863 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,863 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:48:00,864 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:48:00,864 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,864 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,864 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,864 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,864 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,864 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,864 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:48:00,864 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,864 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,864 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,864 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,865 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,865 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:00,865 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:48:00,865 DEBUG [RS:0;7adf9b3d9d04:44361 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:48:00,867 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,867 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,867 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,867 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,867 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,867 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,44361,1731671280377-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:48:00,882 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:48:00,882 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,44361,1731671280377-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,882 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,882 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.Replication(171): 7adf9b3d9d04,44361,1731671280377 started 2024-11-15T11:48:00,897 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:00,897 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(1482): Serving as 7adf9b3d9d04,44361,1731671280377, RpcServer on 7adf9b3d9d04/172.17.0.2:44361, sessionid=0x1013f9c5d120001 2024-11-15T11:48:00,897 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:48:00,897 DEBUG [RS:0;7adf9b3d9d04:44361 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:00,897 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,44361,1731671280377' 2024-11-15T11:48:00,897 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:48:00,898 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:48:00,898 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:48:00,898 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:48:00,898 DEBUG [RS:0;7adf9b3d9d04:44361 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:00,898 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,44361,1731671280377' 2024-11-15T11:48:00,898 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:48:00,899 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:48:00,899 DEBUG [RS:0;7adf9b3d9d04:44361 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:48:00,899 INFO [RS:0;7adf9b3d9d04:44361 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:48:00,899 INFO [RS:0;7adf9b3d9d04:44361 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:48:00,919 WARN [7adf9b3d9d04:42211 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-15T11:48:01,002 INFO [RS:0;7adf9b3d9d04:44361 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C44361%2C1731671280377, suffix=, logDir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377, archiveDir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/oldWALs, maxLogs=32 2024-11-15T11:48:01,003 INFO [RS:0;7adf9b3d9d04:44361 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C44361%2C1731671280377.1731671281003 2024-11-15T11:48:01,018 INFO [RS:0;7adf9b3d9d04:44361 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.1731671281003 2024-11-15T11:48:01,019 DEBUG [RS:0;7adf9b3d9d04:44361 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40305:40305),(127.0.0.1/127.0.0.1:42767:42767)] 2024-11-15T11:48:01,169 DEBUG [7adf9b3d9d04:42211 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T11:48:01,170 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:01,171 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,44361,1731671280377, state=OPENING 2024-11-15T11:48:01,201 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T11:48:01,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:01,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:01,211 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:48:01,211 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:48:01,211 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:48:01,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,44361,1731671280377}] 2024-11-15T11:48:01,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:01,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:01,365 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:48:01,367 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42057, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:48:01,371 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T11:48:01,371 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:48:01,373 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C44361%2C1731671280377.meta, suffix=.meta, logDir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377, archiveDir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/oldWALs, maxLogs=32 2024-11-15T11:48:01,374 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C44361%2C1731671280377.meta.1731671281373.meta 2024-11-15T11:48:01,380 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.meta.1731671281373.meta 2024-11-15T11:48:01,389 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40305:40305),(127.0.0.1/127.0.0.1:42767:42767)] 2024-11-15T11:48:01,389 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:48:01,390 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T11:48:01,390 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T11:48:01,390 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T11:48:01,390 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T11:48:01,390 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:01,390 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T11:48:01,390 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T11:48:01,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:48:01,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:48:01,393 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:01,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:01,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:48:01,394 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:48:01,394 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:01,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:01,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:48:01,395 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:48:01,395 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:01,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:01,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:48:01,396 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:48:01,396 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:01,397 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:01,397 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:48:01,398 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740 2024-11-15T11:48:01,399 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740 2024-11-15T11:48:01,400 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:48:01,400 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:48:01,400 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:48:01,401 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:48:01,402 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799836, jitterRate=0.01704491674900055}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:48:01,402 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T11:48:01,403 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731671281391Writing region info on filesystem at 1731671281391Initializing all the Stores at 1731671281391Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671281391Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671281392 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671281392Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671281392Cleaning up temporary data from old regions at 1731671281400 (+8 ms)Running coprocessor post-open hooks at 1731671281402 (+2 ms)Region opened successfully at 1731671281403 (+1 ms) 2024-11-15T11:48:01,404 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731671281364 2024-11-15T11:48:01,407 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T11:48:01,407 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T11:48:01,408 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:01,409 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,44361,1731671280377, state=OPEN 2024-11-15T11:48:01,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:48:01,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:48:01,445 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:01,446 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:48:01,446 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:48:01,450 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T11:48:01,450 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,44361,1731671280377 in 235 msec 2024-11-15T11:48:01,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T11:48:01,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 684 msec 2024-11-15T11:48:01,456 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:48:01,456 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T11:48:01,457 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:48:01,458 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,44361,1731671280377, seqNum=-1] 2024-11-15T11:48:01,458 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:48:01,459 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47055, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:48:01,465 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 759 msec 2024-11-15T11:48:01,465 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731671281465, completionTime=-1 2024-11-15T11:48:01,466 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T11:48:01,466 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-15T11:48:01,468 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-15T11:48:01,468 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731671341468 2024-11-15T11:48:01,468 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731671401468 2024-11-15T11:48:01,468 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-15T11:48:01,468 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,42211,1731671280222-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:01,468 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,42211,1731671280222-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:01,469 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,42211,1731671280222-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:01,469 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7adf9b3d9d04:42211, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:01,469 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:01,469 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:01,471 DEBUG [master/7adf9b3d9d04:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T11:48:01,473 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.038sec 2024-11-15T11:48:01,473 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T11:48:01,473 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T11:48:01,473 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T11:48:01,474 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T11:48:01,474 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T11:48:01,474 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,42211,1731671280222-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:48:01,474 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,42211,1731671280222-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T11:48:01,476 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T11:48:01,476 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T11:48:01,476 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,42211,1731671280222-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:01,500 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a63a5a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:48:01,500 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7adf9b3d9d04,42211,-1 for getting cluster id 2024-11-15T11:48:01,500 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:48:01,502 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5dc1ca8a-b035-4ed8-9dda-94b81fa102a9' 2024-11-15T11:48:01,503 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:48:01,503 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5dc1ca8a-b035-4ed8-9dda-94b81fa102a9" 2024-11-15T11:48:01,503 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50e31568, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:48:01,503 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7adf9b3d9d04,42211,-1] 2024-11-15T11:48:01,504 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:48:01,504 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:48:01,506 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38202, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:48:01,507 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79b06ab7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:48:01,507 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:48:01,509 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,44361,1731671280377, seqNum=-1] 2024-11-15T11:48:01,509 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:48:01,511 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60290, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:48:01,512 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7adf9b3d9d04,42211,1731671280222 2024-11-15T11:48:01,513 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:01,515 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T11:48:01,516 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T11:48:01,517 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 7adf9b3d9d04,42211,1731671280222 2024-11-15T11:48:01,517 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2f022144 2024-11-15T11:48:01,517 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T11:48:01,518 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38208, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T11:48:01,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T11:48:01,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T11:48:01,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:48:01,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:01,521 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:48:01,521 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:01,521 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-15T11:48:01,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:48:01,523 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:48:01,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741835_1011 (size=405) 2024-11-15T11:48:01,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741835_1011 (size=405) 2024-11-15T11:48:01,531 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c51d362e3329d884080dc1c7191a424c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a 2024-11-15T11:48:01,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741836_1012 (size=88) 2024-11-15T11:48:01,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741836_1012 (size=88) 2024-11-15T11:48:01,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:01,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing c51d362e3329d884080dc1c7191a424c, disabling compactions & flushes 2024-11-15T11:48:01,540 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:01,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:01,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. after waiting 0 ms 2024-11-15T11:48:01,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:01,540 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:01,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c51d362e3329d884080dc1c7191a424c: Waiting for close lock at 1731671281540Disabling compacts and flushes for region at 1731671281540Disabling writes for close at 1731671281540Writing region close event to WAL at 1731671281540Closed at 1731671281540 2024-11-15T11:48:01,542 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:48:01,542 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731671281542"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731671281542"}]},"ts":"1731671281542"} 2024-11-15T11:48:01,544 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T11:48:01,546 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:48:01,546 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731671281546"}]},"ts":"1731671281546"} 2024-11-15T11:48:01,549 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-15T11:48:01,550 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c51d362e3329d884080dc1c7191a424c, ASSIGN}] 2024-11-15T11:48:01,551 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c51d362e3329d884080dc1c7191a424c, ASSIGN 2024-11-15T11:48:01,553 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c51d362e3329d884080dc1c7191a424c, ASSIGN; state=OFFLINE, location=7adf9b3d9d04,44361,1731671280377; forceNewPlan=false, retain=false 2024-11-15T11:48:01,703 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c51d362e3329d884080dc1c7191a424c, regionState=OPENING, regionLocation=7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:01,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c51d362e3329d884080dc1c7191a424c, ASSIGN because future has completed 2024-11-15T11:48:01,708 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c51d362e3329d884080dc1c7191a424c, server=7adf9b3d9d04,44361,1731671280377}] 2024-11-15T11:48:01,866 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:01,867 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c51d362e3329d884080dc1c7191a424c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:48:01,867 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,867 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:01,867 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,867 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,869 INFO [StoreOpener-c51d362e3329d884080dc1c7191a424c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,871 INFO [StoreOpener-c51d362e3329d884080dc1c7191a424c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c51d362e3329d884080dc1c7191a424c columnFamilyName info 2024-11-15T11:48:01,871 DEBUG [StoreOpener-c51d362e3329d884080dc1c7191a424c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:01,872 INFO [StoreOpener-c51d362e3329d884080dc1c7191a424c-1 {}] regionserver.HStore(327): Store=c51d362e3329d884080dc1c7191a424c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:48:01,872 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,873 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,874 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,874 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,874 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,877 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,879 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:48:01,880 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c51d362e3329d884080dc1c7191a424c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751843, jitterRate=-0.043983399868011475}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:48:01,880 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:01,881 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c51d362e3329d884080dc1c7191a424c: Running coprocessor pre-open hook at 1731671281868Writing region info on filesystem at 1731671281868Initializing all the Stores at 1731671281869 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671281869Cleaning up temporary data from old regions at 1731671281874 (+5 ms)Running coprocessor post-open hooks at 1731671281880 (+6 ms)Region opened successfully at 1731671281881 (+1 ms) 2024-11-15T11:48:01,882 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c., pid=6, masterSystemTime=1731671281861 2024-11-15T11:48:01,885 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:01,885 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:01,886 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c51d362e3329d884080dc1c7191a424c, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:01,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c51d362e3329d884080dc1c7191a424c, server=7adf9b3d9d04,44361,1731671280377 because future has completed 2024-11-15T11:48:01,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T11:48:01,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c51d362e3329d884080dc1c7191a424c, server=7adf9b3d9d04,44361,1731671280377 in 182 msec 2024-11-15T11:48:01,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T11:48:01,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c51d362e3329d884080dc1c7191a424c, ASSIGN in 345 msec 2024-11-15T11:48:01,898 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:48:01,898 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731671281898"}]},"ts":"1731671281898"} 2024-11-15T11:48:01,900 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-15T11:48:01,901 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:48:01,903 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 382 msec 2024-11-15T11:48:02,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:02,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:03,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:03,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:04,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:04,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:05,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:05,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:06,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:06,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:06,391 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,391 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,391 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,927 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T11:48:06,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:06,970 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T11:48:06,970 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-15T11:48:07,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:07,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:08,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:08,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:09,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:09,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:10,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:10,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:11,016 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T11:48:11,016 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T11:48:11,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:48:11,017 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T11:48:11,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:11,017 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T11:48:11,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:11,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:11,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:48:11,582 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T11:48:11,582 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-15T11:48:11,586 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:11,586 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:11,590 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c., hostname=7adf9b3d9d04,44361,1731671280377, seqNum=2] 2024-11-15T11:48:11,597 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:11,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:11,602 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T11:48:11,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T11:48:11,604 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T11:48:11,605 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T11:48:11,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44361 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-15T11:48:11,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:11,769 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c51d362e3329d884080dc1c7191a424c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T11:48:11,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/f4ebdf4da8584e72b2119c14de1d1609 is 1080, key is row0001/info:/1731671291591/Put/seqid=0 2024-11-15T11:48:11,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741837_1013 (size=6033) 2024-11-15T11:48:11,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741837_1013 (size=6033) 2024-11-15T11:48:11,795 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/f4ebdf4da8584e72b2119c14de1d1609 2024-11-15T11:48:11,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/f4ebdf4da8584e72b2119c14de1d1609 as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/f4ebdf4da8584e72b2119c14de1d1609 2024-11-15T11:48:11,808 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/f4ebdf4da8584e72b2119c14de1d1609, entries=1, sequenceid=5, filesize=5.9 K 2024-11-15T11:48:11,809 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c51d362e3329d884080dc1c7191a424c in 40ms, sequenceid=5, compaction requested=false 2024-11-15T11:48:11,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c51d362e3329d884080dc1c7191a424c: 2024-11-15T11:48:11,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:11,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-15T11:48:11,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-15T11:48:11,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-15T11:48:11,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 208 msec 2024-11-15T11:48:11,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 219 msec 2024-11-15T11:48:12,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:12,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:13,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:13,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:14,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:14,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:15,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:15,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:16,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:16,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:17,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:17,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:18,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:18,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:19,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:19,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:20,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:20,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:21,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:21,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:21,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T11:48:21,683 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T11:48:21,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:21,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:21,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-15T11:48:21,695 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T11:48:21,696 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T11:48:21,696 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T11:48:21,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44361 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-15T11:48:21,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:21,851 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing c51d362e3329d884080dc1c7191a424c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T11:48:21,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/54f53421776340e98965d39ac22fbc4c is 1080, key is row0002/info:/1731671301684/Put/seqid=0 2024-11-15T11:48:21,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741838_1014 (size=6033) 2024-11-15T11:48:21,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741838_1014 (size=6033) 2024-11-15T11:48:21,865 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/54f53421776340e98965d39ac22fbc4c 2024-11-15T11:48:21,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/54f53421776340e98965d39ac22fbc4c as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/54f53421776340e98965d39ac22fbc4c 2024-11-15T11:48:21,878 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/54f53421776340e98965d39ac22fbc4c, entries=1, sequenceid=9, filesize=5.9 K 2024-11-15T11:48:21,879 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c51d362e3329d884080dc1c7191a424c in 29ms, sequenceid=9, compaction requested=false 2024-11-15T11:48:21,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for c51d362e3329d884080dc1c7191a424c: 2024-11-15T11:48:21,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:21,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-15T11:48:21,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-15T11:48:21,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-15T11:48:21,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-11-15T11:48:21,886 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 198 msec 2024-11-15T11:48:22,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:22,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 after 68061ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:48:22,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:22,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta after 68042ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:48:23,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:23,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:24,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:24,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:25,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:25,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:26,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:26,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:27,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:27,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:28,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:28,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:29,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:29,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:30,202 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:48:30,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:30,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:31,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:31,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-15T11:48:31,762 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T11:48:31,765 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C44361%2C1731671280377.1731671311765 2024-11-15T11:48:31,772 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:31,772 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:31,772 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:31,773 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:31,773 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:31,773 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.1731671281003 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.1731671311765 2024-11-15T11:48:31,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741833_1009 (size=5546) 2024-11-15T11:48:31,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741833_1009 (size=5546) 2024-11-15T11:48:31,782 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40305:40305),(127.0.0.1/127.0.0.1:42767:42767)] 2024-11-15T11:48:31,783 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-15T11:48:31,785 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T11:48:31,786 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T11:48:31,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T11:48:31,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44361 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-15T11:48:31,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:31,940 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing c51d362e3329d884080dc1c7191a424c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T11:48:31,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/94e0189733204e9d9cd309a95434e04b is 1080, key is row0003/info:/1731671311763/Put/seqid=0 2024-11-15T11:48:31,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741840_1016 (size=6033) 2024-11-15T11:48:31,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741840_1016 (size=6033) 2024-11-15T11:48:31,954 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/94e0189733204e9d9cd309a95434e04b 2024-11-15T11:48:31,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/94e0189733204e9d9cd309a95434e04b as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/94e0189733204e9d9cd309a95434e04b 2024-11-15T11:48:31,967 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/94e0189733204e9d9cd309a95434e04b, entries=1, sequenceid=13, filesize=5.9 K 2024-11-15T11:48:31,968 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c51d362e3329d884080dc1c7191a424c in 29ms, sequenceid=13, compaction requested=true 2024-11-15T11:48:31,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for c51d362e3329d884080dc1c7191a424c: 2024-11-15T11:48:31,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:31,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-15T11:48:31,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-15T11:48:31,973 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-15T11:48:31,973 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-11-15T11:48:31,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-15T11:48:32,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:32,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:33,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:33,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:34,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:34,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:35,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:35,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:36,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:36,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:37,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:37,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:38,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:38,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:39,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:39,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:40,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:40,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:41,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:41,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:41,480 INFO [master/7adf9b3d9d04:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T11:48:41,480 INFO [master/7adf9b3d9d04:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T11:48:41,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-15T11:48:41,881 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T11:48:41,882 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:48:41,883 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:48:41,883 DEBUG [Time-limited test {}] regionserver.HStore(1541): c51d362e3329d884080dc1c7191a424c/info is initiating minor compaction (all files) 2024-11-15T11:48:41,883 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:48:41,883 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:41,883 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of c51d362e3329d884080dc1c7191a424c/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:41,883 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/f4ebdf4da8584e72b2119c14de1d1609, hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/54f53421776340e98965d39ac22fbc4c, hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/94e0189733204e9d9cd309a95434e04b] into tmpdir=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp, totalSize=17.7 K 2024-11-15T11:48:41,884 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f4ebdf4da8584e72b2119c14de1d1609, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731671291591 2024-11-15T11:48:41,884 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 54f53421776340e98965d39ac22fbc4c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731671301684 2024-11-15T11:48:41,885 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 94e0189733204e9d9cd309a95434e04b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731671311763 2024-11-15T11:48:41,898 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): c51d362e3329d884080dc1c7191a424c#info#compaction#46 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:48:41,899 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/caf2482892b548fd87299bd4e33fc2b2 is 1080, key is row0001/info:/1731671291591/Put/seqid=0 2024-11-15T11:48:41,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741841_1017 (size=8296) 2024-11-15T11:48:41,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741841_1017 (size=8296) 2024-11-15T11:48:41,910 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/caf2482892b548fd87299bd4e33fc2b2 as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/caf2482892b548fd87299bd4e33fc2b2 2024-11-15T11:48:41,918 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c51d362e3329d884080dc1c7191a424c/info of c51d362e3329d884080dc1c7191a424c into caf2482892b548fd87299bd4e33fc2b2(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:48:41,918 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for c51d362e3329d884080dc1c7191a424c: 2024-11-15T11:48:41,921 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C44361%2C1731671280377.1731671321921 2024-11-15T11:48:41,931 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:41,931 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:41,931 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:41,932 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:41,932 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:41,932 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.1731671311765 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.1731671321921 2024-11-15T11:48:41,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741839_1015 (size=2520) 2024-11-15T11:48:41,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741839_1015 (size=2520) 2024-11-15T11:48:41,935 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.1731671281003 to hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/oldWALs/7adf9b3d9d04%2C44361%2C1731671280377.1731671281003 2024-11-15T11:48:41,939 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40305:40305),(127.0.0.1/127.0.0.1:42767:42767)] 2024-11-15T11:48:41,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:41,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:48:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-15T11:48:41,942 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T11:48:41,943 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T11:48:41,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T11:48:42,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44361 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-15T11:48:42,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:42,097 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing c51d362e3329d884080dc1c7191a424c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T11:48:42,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/929be46072374ae9bbf2114a3d05da23 is 1080, key is row0000/info:/1731671321919/Put/seqid=0 2024-11-15T11:48:42,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741843_1019 (size=6033) 2024-11-15T11:48:42,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741843_1019 (size=6033) 2024-11-15T11:48:42,111 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/929be46072374ae9bbf2114a3d05da23 2024-11-15T11:48:42,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/929be46072374ae9bbf2114a3d05da23 as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/929be46072374ae9bbf2114a3d05da23 2024-11-15T11:48:42,125 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/929be46072374ae9bbf2114a3d05da23, entries=1, sequenceid=18, filesize=5.9 K 2024-11-15T11:48:42,126 INFO [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c51d362e3329d884080dc1c7191a424c in 29ms, sequenceid=18, compaction requested=false 2024-11-15T11:48:42,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for c51d362e3329d884080dc1c7191a424c: 2024-11-15T11:48:42,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:42,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-15T11:48:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-15T11:48:42,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-15T11:48:42,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-15T11:48:42,136 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 194 msec 2024-11-15T11:48:42,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:42,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:43,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:43,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:44,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:44,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:45,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:45,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:46,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:46,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:46,867 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c51d362e3329d884080dc1c7191a424c, had cached 0 bytes from a total of 14329 2024-11-15T11:48:47,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:47,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:48,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:48,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:49,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:49,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:50,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:50,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:51,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:51,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:52,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42211 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-15T11:48:52,002 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T11:48:52,005 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C44361%2C1731671280377.1731671332005 2024-11-15T11:48:52,011 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,012 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,012 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,012 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,012 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,012 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.1731671321921 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.1731671332005 2024-11-15T11:48:52,013 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42767:42767),(127.0.0.1/127.0.0.1:40305:40305)] 2024-11-15T11:48:52,013 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.1731671321921 is not closed yet, will try archiving it next time 2024-11-15T11:48:52,013 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/WALs/7adf9b3d9d04,44361,1731671280377/7adf9b3d9d04%2C44361%2C1731671280377.1731671311765 to hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/oldWALs/7adf9b3d9d04%2C44361%2C1731671280377.1731671311765 2024-11-15T11:48:52,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T11:48:52,013 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:48:52,013 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:48:52,014 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:48:52,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741842_1018 (size=2026) 2024-11-15T11:48:52,014 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:48:52,014 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:48:52,014 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T11:48:52,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741842_1018 (size=2026) 2024-11-15T11:48:52,015 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=918852881, stopped=false 2024-11-15T11:48:52,015 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7adf9b3d9d04,42211,1731671280222 2024-11-15T11:48:52,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:48:52,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:48:52,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:52,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:52,050 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:48:52,051 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:48:52,051 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:48:52,051 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:48:52,051 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:48:52,051 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:48:52,051 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7adf9b3d9d04,44361,1731671280377' ***** 2024-11-15T11:48:52,051 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:48:52,052 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:48:52,052 INFO [RS:0;7adf9b3d9d04:44361 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:48:52,052 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:48:52,052 INFO [RS:0;7adf9b3d9d04:44361 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:48:52,052 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(3091): Received CLOSE for c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:52,052 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(959): stopping server 7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:52,052 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:48:52,052 INFO [RS:0;7adf9b3d9d04:44361 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7adf9b3d9d04:44361. 2024-11-15T11:48:52,052 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c51d362e3329d884080dc1c7191a424c, disabling compactions & flushes 2024-11-15T11:48:52,052 DEBUG [RS:0;7adf9b3d9d04:44361 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:48:52,052 DEBUG [RS:0;7adf9b3d9d04:44361 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:48:52,052 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:52,053 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:52,053 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:48:52,053 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:48:52,053 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:48:52,053 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. after waiting 0 ms 2024-11-15T11:48:52,053 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:52,053 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T11:48:52,053 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c51d362e3329d884080dc1c7191a424c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T11:48:52,053 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T11:48:52,053 DEBUG [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, c51d362e3329d884080dc1c7191a424c=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.} 2024-11-15T11:48:52,053 DEBUG [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c51d362e3329d884080dc1c7191a424c 2024-11-15T11:48:52,053 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:48:52,053 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:48:52,053 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:48:52,053 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:48:52,053 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:48:52,053 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-15T11:48:52,059 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/cf620a821b5445159ece481150369c64 is 1080, key is row0001/info:/1731671332003/Put/seqid=0 2024-11-15T11:48:52,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741845_1021 (size=6033) 2024-11-15T11:48:52,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741845_1021 (size=6033) 2024-11-15T11:48:52,064 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/cf620a821b5445159ece481150369c64 2024-11-15T11:48:52,070 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/.tmp/info/cf620a821b5445159ece481150369c64 as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/cf620a821b5445159ece481150369c64 2024-11-15T11:48:52,073 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/.tmp/info/44a21ef3e3d34a3abf85dcfc0fd64339 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c./info:regioninfo/1731671281886/Put/seqid=0 2024-11-15T11:48:52,077 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/cf620a821b5445159ece481150369c64, entries=1, sequenceid=22, filesize=5.9 K 2024-11-15T11:48:52,078 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c51d362e3329d884080dc1c7191a424c in 25ms, sequenceid=22, compaction requested=true 2024-11-15T11:48:52,079 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/f4ebdf4da8584e72b2119c14de1d1609, hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/54f53421776340e98965d39ac22fbc4c, hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/94e0189733204e9d9cd309a95434e04b] to archive 2024-11-15T11:48:52,081 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T11:48:52,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741846_1022 (size=7308) 2024-11-15T11:48:52,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741846_1022 (size=7308) 2024-11-15T11:48:52,083 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/f4ebdf4da8584e72b2119c14de1d1609 to hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/f4ebdf4da8584e72b2119c14de1d1609 2024-11-15T11:48:52,083 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/.tmp/info/44a21ef3e3d34a3abf85dcfc0fd64339 2024-11-15T11:48:52,085 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/54f53421776340e98965d39ac22fbc4c to hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/54f53421776340e98965d39ac22fbc4c 2024-11-15T11:48:52,086 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/94e0189733204e9d9cd309a95434e04b to hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/info/94e0189733204e9d9cd309a95434e04b 2024-11-15T11:48:52,086 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7adf9b3d9d04:42211 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-15T11:48:52,087 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f4ebdf4da8584e72b2119c14de1d1609=6033, 54f53421776340e98965d39ac22fbc4c=6033, 94e0189733204e9d9cd309a95434e04b=6033] 2024-11-15T11:48:52,091 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c51d362e3329d884080dc1c7191a424c/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-15T11:48:52,092 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:52,092 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c51d362e3329d884080dc1c7191a424c: Waiting for close lock at 1731671332052Running coprocessor pre-close hooks at 1731671332052Disabling compacts and flushes for region at 1731671332052Disabling writes for close at 1731671332053 (+1 ms)Obtaining lock to block concurrent updates at 1731671332053Preparing flush snapshotting stores in c51d362e3329d884080dc1c7191a424c at 1731671332053Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731671332053Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. at 1731671332054 (+1 ms)Flushing c51d362e3329d884080dc1c7191a424c/info: creating writer at 1731671332054Flushing c51d362e3329d884080dc1c7191a424c/info: appending metadata at 1731671332058 (+4 ms)Flushing c51d362e3329d884080dc1c7191a424c/info: closing flushed file at 1731671332058Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47a2cb6c: reopening flushed file at 1731671332069 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c51d362e3329d884080dc1c7191a424c in 25ms, sequenceid=22, compaction requested=true at 1731671332078 (+9 ms)Writing region close event to WAL at 1731671332087 (+9 ms)Running coprocessor post-close hooks at 1731671332091 (+4 ms)Closed at 1731671332092 (+1 ms) 2024-11-15T11:48:52,092 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731671281518.c51d362e3329d884080dc1c7191a424c. 2024-11-15T11:48:52,105 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/.tmp/ns/2c85fcfe8b57422b943054815a5b54a8 is 43, key is default/ns:d/1731671281460/Put/seqid=0 2024-11-15T11:48:52,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741847_1023 (size=5153) 2024-11-15T11:48:52,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741847_1023 (size=5153) 2024-11-15T11:48:52,113 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/.tmp/ns/2c85fcfe8b57422b943054815a5b54a8 2024-11-15T11:48:52,133 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/.tmp/table/709ee79e325f499bbaf22a7b909fe794 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731671281898/Put/seqid=0 2024-11-15T11:48:52,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741848_1024 (size=5508) 2024-11-15T11:48:52,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741848_1024 (size=5508) 2024-11-15T11:48:52,140 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/.tmp/table/709ee79e325f499bbaf22a7b909fe794 2024-11-15T11:48:52,146 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/.tmp/info/44a21ef3e3d34a3abf85dcfc0fd64339 as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/info/44a21ef3e3d34a3abf85dcfc0fd64339 2024-11-15T11:48:52,151 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/info/44a21ef3e3d34a3abf85dcfc0fd64339, entries=10, sequenceid=11, filesize=7.1 K 2024-11-15T11:48:52,152 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/.tmp/ns/2c85fcfe8b57422b943054815a5b54a8 as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/ns/2c85fcfe8b57422b943054815a5b54a8 2024-11-15T11:48:52,158 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/ns/2c85fcfe8b57422b943054815a5b54a8, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T11:48:52,159 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/.tmp/table/709ee79e325f499bbaf22a7b909fe794 as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/table/709ee79e325f499bbaf22a7b909fe794 2024-11-15T11:48:52,165 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/table/709ee79e325f499bbaf22a7b909fe794, entries=2, sequenceid=11, filesize=5.4 K 2024-11-15T11:48:52,166 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=11, compaction requested=false 2024-11-15T11:48:52,171 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T11:48:52,171 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:48:52,171 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:48:52,171 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671332053Running coprocessor pre-close hooks at 1731671332053Disabling compacts and flushes for region at 1731671332053Disabling writes for close at 1731671332053Obtaining lock to block concurrent updates at 1731671332053Preparing flush snapshotting stores in 1588230740 at 1731671332053Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731671332054 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731671332054Flushing 1588230740/info: creating writer at 1731671332054Flushing 1588230740/info: appending metadata at 1731671332073 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731671332073Flushing 1588230740/ns: creating writer at 1731671332090 (+17 ms)Flushing 1588230740/ns: appending metadata at 1731671332105 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731671332105Flushing 1588230740/table: creating writer at 1731671332117 (+12 ms)Flushing 1588230740/table: appending metadata at 1731671332133 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731671332133Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@494670f0: reopening flushed file at 1731671332145 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5253def3: reopening flushed file at 1731671332152 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d357d80: reopening flushed file at 1731671332158 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=11, compaction requested=false at 1731671332166 (+8 ms)Writing region close event to WAL at 1731671332167 (+1 ms)Running coprocessor post-close hooks at 1731671332171 (+4 ms)Closed at 1731671332171 2024-11-15T11:48:52,172 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T11:48:52,253 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(976): stopping server 7adf9b3d9d04,44361,1731671280377; all regions closed. 2024-11-15T11:48:52,254 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,254 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,254 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,254 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,254 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741834_1010 (size=3306) 2024-11-15T11:48:52,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741834_1010 (size=3306) 2024-11-15T11:48:52,259 DEBUG [RS:0;7adf9b3d9d04:44361 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/oldWALs 2024-11-15T11:48:52,259 INFO [RS:0;7adf9b3d9d04:44361 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C44361%2C1731671280377.meta:.meta(num 1731671281373) 2024-11-15T11:48:52,259 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,259 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,259 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,259 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,260 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741844_1020 (size=1252) 2024-11-15T11:48:52,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741844_1020 (size=1252) 2024-11-15T11:48:52,265 DEBUG [RS:0;7adf9b3d9d04:44361 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/oldWALs 2024-11-15T11:48:52,265 INFO [RS:0;7adf9b3d9d04:44361 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C44361%2C1731671280377:(num 1731671332005) 2024-11-15T11:48:52,265 DEBUG [RS:0;7adf9b3d9d04:44361 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:48:52,265 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:48:52,265 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:48:52,265 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.ChoreService(370): Chore service for: regionserver/7adf9b3d9d04:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T11:48:52,265 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:48:52,265 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:48:52,266 INFO [RS:0;7adf9b3d9d04:44361 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44361 2024-11-15T11:48:52,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7adf9b3d9d04,44361,1731671280377 2024-11-15T11:48:52,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:48:52,292 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:48:52,300 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7adf9b3d9d04,44361,1731671280377] 2024-11-15T11:48:52,308 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7adf9b3d9d04,44361,1731671280377 already deleted, retry=false 2024-11-15T11:48:52,309 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7adf9b3d9d04,44361,1731671280377 expired; onlineServers=0 2024-11-15T11:48:52,309 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7adf9b3d9d04,42211,1731671280222' ***** 2024-11-15T11:48:52,309 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T11:48:52,309 INFO [M:0;7adf9b3d9d04:42211 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:48:52,309 INFO [M:0;7adf9b3d9d04:42211 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:48:52,309 DEBUG [M:0;7adf9b3d9d04:42211 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T11:48:52,309 DEBUG [M:0;7adf9b3d9d04:42211 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T11:48:52,309 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T11:48:52,309 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671280710 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671280710,5,FailOnTimeoutGroup] 2024-11-15T11:48:52,309 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671280710 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671280710,5,FailOnTimeoutGroup] 2024-11-15T11:48:52,309 INFO [M:0;7adf9b3d9d04:42211 {}] hbase.ChoreService(370): Chore service for: master/7adf9b3d9d04:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T11:48:52,309 INFO [M:0;7adf9b3d9d04:42211 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:48:52,310 DEBUG [M:0;7adf9b3d9d04:42211 {}] master.HMaster(1795): Stopping service threads 2024-11-15T11:48:52,310 INFO [M:0;7adf9b3d9d04:42211 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T11:48:52,310 INFO [M:0;7adf9b3d9d04:42211 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:48:52,310 INFO [M:0;7adf9b3d9d04:42211 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T11:48:52,310 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T11:48:52,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T11:48:52,317 DEBUG [M:0;7adf9b3d9d04:42211 {}] zookeeper.ZKUtil(347): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T11:48:52,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:52,317 WARN [M:0;7adf9b3d9d04:42211 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T11:48:52,318 INFO [M:0;7adf9b3d9d04:42211 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/.lastflushedseqids 2024-11-15T11:48:52,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741849_1025 (size=130) 2024-11-15T11:48:52,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741849_1025 (size=130) 2024-11-15T11:48:52,325 INFO [M:0;7adf9b3d9d04:42211 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T11:48:52,325 INFO [M:0;7adf9b3d9d04:42211 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T11:48:52,325 DEBUG [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:48:52,325 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:52,325 DEBUG [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:52,325 DEBUG [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:48:52,325 DEBUG [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:52,325 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-11-15T11:48:52,342 DEBUG [M:0;7adf9b3d9d04:42211 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b549ba99993646c0ba1cd35e2fbe5091 is 82, key is hbase:meta,,1/info:regioninfo/1731671281408/Put/seqid=0 2024-11-15T11:48:52,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741850_1026 (size=5672) 2024-11-15T11:48:52,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741850_1026 (size=5672) 2024-11-15T11:48:52,346 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b549ba99993646c0ba1cd35e2fbe5091 2024-11-15T11:48:52,367 DEBUG [M:0;7adf9b3d9d04:42211 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4a4936982a04a99a2b6eadce6660a71 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731671281903/Put/seqid=0 2024-11-15T11:48:52,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741851_1027 (size=7818) 2024-11-15T11:48:52,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741851_1027 (size=7818) 2024-11-15T11:48:52,372 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4a4936982a04a99a2b6eadce6660a71 2024-11-15T11:48:52,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:52,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:52,377 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e4a4936982a04a99a2b6eadce6660a71 2024-11-15T11:48:52,393 DEBUG [M:0;7adf9b3d9d04:42211 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8a8c48d2df1646f88d7b84fea595725c is 69, key is 7adf9b3d9d04,44361,1731671280377/rs:state/1731671280844/Put/seqid=0 2024-11-15T11:48:52,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741852_1028 (size=5156) 2024-11-15T11:48:52,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741852_1028 (size=5156) 2024-11-15T11:48:52,398 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8a8c48d2df1646f88d7b84fea595725c 2024-11-15T11:48:52,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:48:52,400 INFO [RS:0;7adf9b3d9d04:44361 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:48:52,400 INFO [RS:0;7adf9b3d9d04:44361 {}] regionserver.HRegionServer(1031): Exiting; stopping=7adf9b3d9d04,44361,1731671280377; zookeeper connection closed. 2024-11-15T11:48:52,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44361-0x1013f9c5d120001, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:48:52,401 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@346ca866 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@346ca866 2024-11-15T11:48:52,401 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T11:48:52,419 DEBUG [M:0;7adf9b3d9d04:42211 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b7bb75fad094e96874b810c91471276 is 52, key is load_balancer_on/state:d/1731671281514/Put/seqid=0 2024-11-15T11:48:52,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741853_1029 (size=5056) 2024-11-15T11:48:52,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741853_1029 (size=5056) 2024-11-15T11:48:52,424 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b7bb75fad094e96874b810c91471276 2024-11-15T11:48:52,429 DEBUG [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b549ba99993646c0ba1cd35e2fbe5091 as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b549ba99993646c0ba1cd35e2fbe5091 2024-11-15T11:48:52,434 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b549ba99993646c0ba1cd35e2fbe5091, entries=8, sequenceid=121, filesize=5.5 K 2024-11-15T11:48:52,435 DEBUG [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4a4936982a04a99a2b6eadce6660a71 as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e4a4936982a04a99a2b6eadce6660a71 2024-11-15T11:48:52,440 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e4a4936982a04a99a2b6eadce6660a71 2024-11-15T11:48:52,440 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e4a4936982a04a99a2b6eadce6660a71, entries=14, sequenceid=121, filesize=7.6 K 2024-11-15T11:48:52,441 DEBUG [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8a8c48d2df1646f88d7b84fea595725c as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8a8c48d2df1646f88d7b84fea595725c 2024-11-15T11:48:52,446 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8a8c48d2df1646f88d7b84fea595725c, entries=1, sequenceid=121, filesize=5.0 K 2024-11-15T11:48:52,447 DEBUG [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b7bb75fad094e96874b810c91471276 as hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0b7bb75fad094e96874b810c91471276 2024-11-15T11:48:52,452 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38977/user/jenkins/test-data/bd01edc6-7bc0-2dd0-bd9f-008f525ad52a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0b7bb75fad094e96874b810c91471276, entries=1, sequenceid=121, filesize=4.9 K 2024-11-15T11:48:52,453 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=121, compaction requested=false 2024-11-15T11:48:52,455 INFO [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:52,455 DEBUG [M:0;7adf9b3d9d04:42211 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671332325Disabling compacts and flushes for region at 1731671332325Disabling writes for close at 1731671332325Obtaining lock to block concurrent updates at 1731671332325Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731671332325Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1731671332326 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731671332326Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731671332326Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731671332341 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731671332341Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731671332351 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731671332366 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731671332366Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731671332377 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731671332392 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731671332393 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731671332403 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731671332418 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731671332418Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e4501bf: reopening flushed file at 1731671332428 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37e44b68: reopening flushed file at 1731671332434 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a20c828: reopening flushed file at 1731671332441 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b21be93: reopening flushed file at 1731671332447 (+6 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=121, compaction requested=false at 1731671332454 (+7 ms)Writing region close event to WAL at 1731671332455 (+1 ms)Closed at 1731671332455 2024-11-15T11:48:52,455 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,455 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,455 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,456 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,456 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:48:52,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46719 is added to blk_1073741830_1006 (size=52987) 2024-11-15T11:48:52,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44127 is added to blk_1073741830_1006 (size=52987) 2024-11-15T11:48:52,458 INFO [M:0;7adf9b3d9d04:42211 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T11:48:52,458 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:48:52,458 INFO [M:0;7adf9b3d9d04:42211 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42211 2024-11-15T11:48:52,458 INFO [M:0;7adf9b3d9d04:42211 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:48:52,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:48:52,567 INFO [M:0;7adf9b3d9d04:42211 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:48:52,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42211-0x1013f9c5d120000, quorum=127.0.0.1:62622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:48:52,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14b98ef8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:48:52,570 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7fc2e521{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:48:52,570 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:48:52,571 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65345c29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:48:52,571 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7009eb0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/hadoop.log.dir/,STOPPED} 2024-11-15T11:48:52,573 WARN [BP-166489227-172.17.0.2-1731671278544 heartbeating to localhost/127.0.0.1:38977 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:48:52,573 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:48:52,573 WARN [BP-166489227-172.17.0.2-1731671278544 heartbeating to localhost/127.0.0.1:38977 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-166489227-172.17.0.2-1731671278544 (Datanode Uuid 91b76560-df27-4a94-a756-fc127c9aa7ad) service to localhost/127.0.0.1:38977 2024-11-15T11:48:52,573 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:48:52,574 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/data/data3/current/BP-166489227-172.17.0.2-1731671278544 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:48:52,574 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/data/data4/current/BP-166489227-172.17.0.2-1731671278544 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:48:52,575 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:48:52,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e4582a5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:48:52,578 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37d74326{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:48:52,578 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:48:52,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e6bebf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:48:52,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@278dab99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/hadoop.log.dir/,STOPPED} 2024-11-15T11:48:52,580 WARN [BP-166489227-172.17.0.2-1731671278544 heartbeating to localhost/127.0.0.1:38977 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:48:52,580 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:48:52,580 WARN [BP-166489227-172.17.0.2-1731671278544 heartbeating to localhost/127.0.0.1:38977 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-166489227-172.17.0.2-1731671278544 (Datanode Uuid ad5c07a1-4fad-4172-a5cb-cf56a648de9d) service to localhost/127.0.0.1:38977 2024-11-15T11:48:52,580 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:48:52,581 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/data/data1/current/BP-166489227-172.17.0.2-1731671278544 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:48:52,581 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/cluster_b6eeed7d-7a31-bb17-da52-edb8570b3770/data/data2/current/BP-166489227-172.17.0.2-1731671278544 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:48:52,581 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:48:52,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30d9f702{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:48:52,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d483d07{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:48:52,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:48:52,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@df163d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:48:52,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6da95783{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/hadoop.log.dir/,STOPPED} 2024-11-15T11:48:52,593 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T11:48:52,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T11:48:52,619 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/7adf9b3d9d04:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38977 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38977 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:38977 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38977 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38977 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=485 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=254 (was 252) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=10337 (was 10514) 2024-11-15T11:48:52,627 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=254, ProcessCount=11, AvailableMemoryMB=10337 2024-11-15T11:48:52,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T11:48:52,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/hadoop.log.dir so I do NOT create it in target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c 2024-11-15T11:48:52,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a1fa362b-779e-af5b-136a-ccaedfa8618d/hadoop.tmp.dir so I do NOT create it in target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c 2024-11-15T11:48:52,627 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc, deleteOnExit=true 2024-11-15T11:48:52,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T11:48:52,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/test.cache.data in system properties and HBase conf 2024-11-15T11:48:52,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T11:48:52,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/hadoop.log.dir in system properties and HBase conf 2024-11-15T11:48:52,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T11:48:52,628 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T11:48:52,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/nfs.dump.dir in system properties and HBase conf 2024-11-15T11:48:52,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/java.io.tmpdir in system properties and HBase conf 2024-11-15T11:48:52,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:48:52,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T11:48:52,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T11:48:52,641 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:48:52,869 INFO [regionserver/7adf9b3d9d04:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:48:52,892 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:48:52,896 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:48:52,898 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:48:52,898 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:48:52,898 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:48:52,898 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:48:52,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@671b15e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:48:52,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54fcac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:48:53,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64d2170c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/java.io.tmpdir/jetty-localhost-45757-hadoop-hdfs-3_4_1-tests_jar-_-any-4615461252837718979/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:48:53,002 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@551592b1{HTTP/1.1, (http/1.1)}{localhost:45757} 2024-11-15T11:48:53,002 INFO [Time-limited test {}] server.Server(415): Started @248756ms 2024-11-15T11:48:53,016 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:48:53,223 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:48:53,225 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:48:53,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:48:53,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:48:53,226 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:48:53,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2eb912ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:48:53,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f424370{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:48:53,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a6e8e46{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/java.io.tmpdir/jetty-localhost-40935-hadoop-hdfs-3_4_1-tests_jar-_-any-7327717540955428226/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:48:53,335 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1204fb24{HTTP/1.1, (http/1.1)}{localhost:40935} 2024-11-15T11:48:53,336 INFO [Time-limited test {}] server.Server(415): Started @249089ms 2024-11-15T11:48:53,337 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:48:53,371 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:48:53,374 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:48:53,375 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:48:53,375 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:48:53,375 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:48:53,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13ef5561{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:48:53,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f61588{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:48:53,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:53,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:53,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@42c5c09{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/java.io.tmpdir/jetty-localhost-36987-hadoop-hdfs-3_4_1-tests_jar-_-any-5158626908994940002/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:48:53,479 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ce0a24{HTTP/1.1, (http/1.1)}{localhost:36987} 2024-11-15T11:48:53,479 INFO [Time-limited test {}] server.Server(415): Started @249233ms 2024-11-15T11:48:53,480 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:48:54,086 WARN [Thread-1974 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/data/data2/current/BP-31277891-172.17.0.2-1731671332645/current, will proceed with Du for space computation calculation, 2024-11-15T11:48:54,086 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/data/data1/current/BP-31277891-172.17.0.2-1731671332645/current, will proceed with Du for space computation calculation, 2024-11-15T11:48:54,105 WARN [Thread-1938 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:48:54,107 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6bb25487c2884266 with lease ID 0x50bc705f877e0675: Processing first storage report for DS-7527835b-2d12-4453-9ec2-1b1a265df208 from datanode DatanodeRegistration(127.0.0.1:43399, datanodeUuid=c43d53ca-408b-499f-9498-c8f88ac4ca2d, infoPort=38175, infoSecurePort=0, ipcPort=38779, storageInfo=lv=-57;cid=testClusterID;nsid=1499533953;c=1731671332645) 2024-11-15T11:48:54,107 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6bb25487c2884266 with lease ID 0x50bc705f877e0675: from storage DS-7527835b-2d12-4453-9ec2-1b1a265df208 node DatanodeRegistration(127.0.0.1:43399, datanodeUuid=c43d53ca-408b-499f-9498-c8f88ac4ca2d, infoPort=38175, infoSecurePort=0, ipcPort=38779, storageInfo=lv=-57;cid=testClusterID;nsid=1499533953;c=1731671332645), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:48:54,107 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6bb25487c2884266 with lease ID 0x50bc705f877e0675: Processing first storage report for DS-43b4cf5a-a389-4e37-85bc-3d79c94ae4ed from datanode DatanodeRegistration(127.0.0.1:43399, datanodeUuid=c43d53ca-408b-499f-9498-c8f88ac4ca2d, infoPort=38175, infoSecurePort=0, ipcPort=38779, storageInfo=lv=-57;cid=testClusterID;nsid=1499533953;c=1731671332645) 2024-11-15T11:48:54,107 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6bb25487c2884266 with lease ID 0x50bc705f877e0675: from storage DS-43b4cf5a-a389-4e37-85bc-3d79c94ae4ed node DatanodeRegistration(127.0.0.1:43399, datanodeUuid=c43d53ca-408b-499f-9498-c8f88ac4ca2d, infoPort=38175, infoSecurePort=0, ipcPort=38779, storageInfo=lv=-57;cid=testClusterID;nsid=1499533953;c=1731671332645), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:48:54,277 WARN [Thread-1985 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/data/data3/current/BP-31277891-172.17.0.2-1731671332645/current, will proceed with Du for space computation calculation, 2024-11-15T11:48:54,277 WARN [Thread-1986 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/data/data4/current/BP-31277891-172.17.0.2-1731671332645/current, will proceed with Du for space computation calculation, 2024-11-15T11:48:54,295 WARN [Thread-1961 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:48:54,296 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb79e9e1e4b39bc75 with lease ID 0x50bc705f877e0676: Processing first storage report for DS-ede0a6be-e5bd-4bf4-805b-b2a6bf195c14 from datanode DatanodeRegistration(127.0.0.1:35265, datanodeUuid=474fe460-4e8f-441d-8d17-66401fec762f, infoPort=45607, infoSecurePort=0, ipcPort=46589, storageInfo=lv=-57;cid=testClusterID;nsid=1499533953;c=1731671332645) 2024-11-15T11:48:54,296 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb79e9e1e4b39bc75 with lease ID 0x50bc705f877e0676: from storage DS-ede0a6be-e5bd-4bf4-805b-b2a6bf195c14 node DatanodeRegistration(127.0.0.1:35265, datanodeUuid=474fe460-4e8f-441d-8d17-66401fec762f, infoPort=45607, infoSecurePort=0, ipcPort=46589, storageInfo=lv=-57;cid=testClusterID;nsid=1499533953;c=1731671332645), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:48:54,296 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb79e9e1e4b39bc75 with lease ID 0x50bc705f877e0676: Processing first storage report for DS-da362157-5526-4bfd-9143-9d15e361b5fe from datanode DatanodeRegistration(127.0.0.1:35265, datanodeUuid=474fe460-4e8f-441d-8d17-66401fec762f, infoPort=45607, infoSecurePort=0, ipcPort=46589, storageInfo=lv=-57;cid=testClusterID;nsid=1499533953;c=1731671332645) 2024-11-15T11:48:54,296 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb79e9e1e4b39bc75 with lease ID 0x50bc705f877e0676: from storage DS-da362157-5526-4bfd-9143-9d15e361b5fe node DatanodeRegistration(127.0.0.1:35265, datanodeUuid=474fe460-4e8f-441d-8d17-66401fec762f, infoPort=45607, infoSecurePort=0, ipcPort=46589, storageInfo=lv=-57;cid=testClusterID;nsid=1499533953;c=1731671332645), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:48:54,305 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c 2024-11-15T11:48:54,308 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/zookeeper_0, clientPort=59738, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T11:48:54,309 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59738 2024-11-15T11:48:54,309 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:54,311 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:54,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:48:54,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:48:54,322 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09 with version=8 2024-11-15T11:48:54,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/hbase-staging 2024-11-15T11:48:54,325 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:48:54,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:54,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:54,325 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:48:54,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:54,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:48:54,325 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T11:48:54,325 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:48:54,326 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37705 2024-11-15T11:48:54,327 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37705 connecting to ZooKeeper ensemble=127.0.0.1:59738 2024-11-15T11:48:54,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:377050x0, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:48:54,367 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37705-0x1013f9d30680000 connected 2024-11-15T11:48:54,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:54,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:54,438 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:54,440 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:54,442 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:48:54,442 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09, hbase.cluster.distributed=false 2024-11-15T11:48:54,444 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:48:54,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37705 2024-11-15T11:48:54,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37705 2024-11-15T11:48:54,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37705 2024-11-15T11:48:54,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37705 2024-11-15T11:48:54,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37705 2024-11-15T11:48:54,464 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:48:54,464 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:54,464 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:54,464 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:48:54,464 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:48:54,464 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:48:54,464 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:48:54,464 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:48:54,465 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39085 2024-11-15T11:48:54,466 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39085 connecting to ZooKeeper ensemble=127.0.0.1:59738 2024-11-15T11:48:54,467 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:54,469 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:54,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390850x0, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:48:54,480 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:390850x0, quorum=127.0.0.1:59738, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:48:54,480 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39085-0x1013f9d30680001 connected 2024-11-15T11:48:54,480 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:48:54,480 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:48:54,481 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T11:48:54,482 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:48:54,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39085 2024-11-15T11:48:54,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39085 2024-11-15T11:48:54,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39085 2024-11-15T11:48:54,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39085 2024-11-15T11:48:54,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39085 2024-11-15T11:48:54,501 DEBUG [M:0;7adf9b3d9d04:37705 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7adf9b3d9d04:37705 2024-11-15T11:48:54,501 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7adf9b3d9d04,37705,1731671334324 2024-11-15T11:48:54,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:48:54,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:48:54,513 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7adf9b3d9d04,37705,1731671334324 2024-11-15T11:48:54,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T11:48:54,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:54,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:54,521 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T11:48:54,522 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7adf9b3d9d04,37705,1731671334324 from backup master directory 2024-11-15T11:48:54,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7adf9b3d9d04,37705,1731671334324 2024-11-15T11:48:54,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:48:54,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:48:54,529 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:48:54,529 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7adf9b3d9d04,37705,1731671334324 2024-11-15T11:48:54,534 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/hbase.id] with ID: ce244ac2-917a-41eb-9d79-87b8d0e87811 2024-11-15T11:48:54,534 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/.tmp/hbase.id 2024-11-15T11:48:54,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:48:54,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:48:54,543 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/.tmp/hbase.id]:[hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/hbase.id] 2024-11-15T11:48:54,556 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:54,556 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T11:48:54,558 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T11:48:54,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:54,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:54,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:48:54,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:48:54,575 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:48:54,576 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T11:48:54,577 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:48:54,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:48:54,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:48:54,586 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store 2024-11-15T11:48:54,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:48:54,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:48:54,593 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:54,593 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:48:54,593 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:54,593 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:54,593 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:48:54,593 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:54,593 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:48:54,593 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671334593Disabling compacts and flushes for region at 1731671334593Disabling writes for close at 1731671334593Writing region close event to WAL at 1731671334593Closed at 1731671334593 2024-11-15T11:48:54,594 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/.initializing 2024-11-15T11:48:54,594 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/WALs/7adf9b3d9d04,37705,1731671334324 2024-11-15T11:48:54,597 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C37705%2C1731671334324, suffix=, logDir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/WALs/7adf9b3d9d04,37705,1731671334324, archiveDir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/oldWALs, maxLogs=10 2024-11-15T11:48:54,597 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C37705%2C1731671334324.1731671334597 2024-11-15T11:48:54,606 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/WALs/7adf9b3d9d04,37705,1731671334324/7adf9b3d9d04%2C37705%2C1731671334324.1731671334597 2024-11-15T11:48:54,611 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45607:45607),(127.0.0.1/127.0.0.1:38175:38175)] 2024-11-15T11:48:54,620 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:48:54,620 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:54,620 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,620 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,623 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T11:48:54,624 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:54,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:54,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T11:48:54,626 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:54,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:48:54,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T11:48:54,628 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:54,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:48:54,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T11:48:54,630 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:54,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:48:54,631 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,631 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,632 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,633 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,633 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,633 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T11:48:54,634 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:48:54,636 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:48:54,637 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780626, jitterRate=-0.007383301854133606}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T11:48:54,637 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731671334621Initializing all the Stores at 1731671334621Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671334621Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671334622 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671334622Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671334622Cleaning up temporary data from old regions at 1731671334633 (+11 ms)Region opened successfully at 1731671334637 (+4 ms) 2024-11-15T11:48:54,638 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T11:48:54,641 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@640c0c36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:48:54,642 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T11:48:54,642 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T11:48:54,642 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T11:48:54,642 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T11:48:54,643 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T11:48:54,643 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T11:48:54,643 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T11:48:54,645 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T11:48:54,646 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T11:48:54,660 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T11:48:54,660 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T11:48:54,661 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T11:48:54,671 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T11:48:54,671 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T11:48:54,672 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T11:48:54,679 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T11:48:54,680 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T11:48:54,687 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T11:48:54,690 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T11:48:54,696 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T11:48:54,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:48:54,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:48:54,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:54,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:54,705 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7adf9b3d9d04,37705,1731671334324, sessionid=0x1013f9d30680000, setting cluster-up flag (Was=false) 2024-11-15T11:48:54,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:54,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:54,746 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T11:48:54,747 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,37705,1731671334324 2024-11-15T11:48:54,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:54,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:54,787 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T11:48:54,789 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,37705,1731671334324 2024-11-15T11:48:54,790 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T11:48:54,791 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T11:48:54,792 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(746): ClusterId : ce244ac2-917a-41eb-9d79-87b8d0e87811 2024-11-15T11:48:54,792 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T11:48:54,792 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:48:54,792 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T11:48:54,792 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7adf9b3d9d04,37705,1731671334324 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T11:48:54,802 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:48:54,802 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:48:54,802 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:48:54,802 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:48:54,802 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:48:54,802 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:48:54,802 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7adf9b3d9d04:0, corePoolSize=10, maxPoolSize=10 2024-11-15T11:48:54,802 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,803 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:48:54,803 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,803 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731671364803 2024-11-15T11:48:54,803 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T11:48:54,804 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T11:48:54,804 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T11:48:54,804 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T11:48:54,804 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T11:48:54,804 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T11:48:54,804 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,804 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T11:48:54,804 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T11:48:54,804 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T11:48:54,805 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:48:54,805 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T11:48:54,805 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T11:48:54,805 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T11:48:54,805 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671334805,5,FailOnTimeoutGroup] 2024-11-15T11:48:54,805 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671334805,5,FailOnTimeoutGroup] 2024-11-15T11:48:54,805 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,805 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T11:48:54,805 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,805 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,806 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:54,806 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T11:48:54,813 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:48:54,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:48:54,813 DEBUG [RS:0;7adf9b3d9d04:39085 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63b20fcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:48:54,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:48:54,826 DEBUG [RS:0;7adf9b3d9d04:39085 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7adf9b3d9d04:39085 2024-11-15T11:48:54,827 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:48:54,827 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:48:54,827 DEBUG [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:48:54,827 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(2659): reportForDuty to master=7adf9b3d9d04,37705,1731671334324 with port=39085, startcode=1731671334464 2024-11-15T11:48:54,828 DEBUG [RS:0;7adf9b3d9d04:39085 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:48:54,829 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56371, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:48:54,830 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37705 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:54,830 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37705 {}] master.ServerManager(517): Registering regionserver=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:54,831 DEBUG [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09 2024-11-15T11:48:54,832 DEBUG [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42507 2024-11-15T11:48:54,832 DEBUG [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:48:54,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:48:54,843 DEBUG [RS:0;7adf9b3d9d04:39085 {}] zookeeper.ZKUtil(111): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:54,843 WARN [RS:0;7adf9b3d9d04:39085 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:48:54,843 INFO [RS:0;7adf9b3d9d04:39085 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:48:54,844 DEBUG [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:54,844 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7adf9b3d9d04,39085,1731671334464] 2024-11-15T11:48:54,847 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:48:54,849 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:48:54,849 INFO [RS:0;7adf9b3d9d04:39085 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:48:54,849 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,850 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:48:54,851 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:48:54,851 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:48:54,851 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:48:54,852 DEBUG [RS:0;7adf9b3d9d04:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:48:54,852 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,852 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,852 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,852 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,852 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,852 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,39085,1731671334464-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:48:54,867 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:48:54,867 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,39085,1731671334464-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,867 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,867 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.Replication(171): 7adf9b3d9d04,39085,1731671334464 started 2024-11-15T11:48:54,880 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:54,880 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(1482): Serving as 7adf9b3d9d04,39085,1731671334464, RpcServer on 7adf9b3d9d04/172.17.0.2:39085, sessionid=0x1013f9d30680001 2024-11-15T11:48:54,881 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:48:54,881 DEBUG [RS:0;7adf9b3d9d04:39085 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:54,881 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,39085,1731671334464' 2024-11-15T11:48:54,881 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:48:54,881 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:48:54,881 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:48:54,881 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:48:54,881 DEBUG [RS:0;7adf9b3d9d04:39085 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:54,882 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,39085,1731671334464' 2024-11-15T11:48:54,882 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:48:54,882 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:48:54,882 DEBUG [RS:0;7adf9b3d9d04:39085 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:48:54,882 INFO [RS:0;7adf9b3d9d04:39085 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:48:54,882 INFO [RS:0;7adf9b3d9d04:39085 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:48:54,985 INFO [RS:0;7adf9b3d9d04:39085 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C39085%2C1731671334464, suffix=, logDir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464, archiveDir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/oldWALs, maxLogs=32 2024-11-15T11:48:54,986 INFO [RS:0;7adf9b3d9d04:39085 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C39085%2C1731671334464.1731671334986 2024-11-15T11:48:54,996 INFO [RS:0;7adf9b3d9d04:39085 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464/7adf9b3d9d04%2C39085%2C1731671334464.1731671334986 2024-11-15T11:48:54,997 DEBUG [RS:0;7adf9b3d9d04:39085 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45607:45607),(127.0.0.1/127.0.0.1:38175:38175)] 2024-11-15T11:48:55,214 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T11:48:55,214 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09 2024-11-15T11:48:55,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741833_1009 (size=32) 2024-11-15T11:48:55,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741833_1009 (size=32) 2024-11-15T11:48:55,220 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:55,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:48:55,223 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:48:55,223 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:55,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:55,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:48:55,225 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:48:55,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:55,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:55,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:48:55,227 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:48:55,227 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:55,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:55,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:48:55,229 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:48:55,229 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:55,229 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:55,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:48:55,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740 2024-11-15T11:48:55,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740 2024-11-15T11:48:55,232 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:48:55,232 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:48:55,232 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:48:55,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:48:55,236 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:48:55,236 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800176, jitterRate=0.017477422952651978}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:48:55,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731671335220Initializing all the Stores at 1731671335221 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671335221Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671335221Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671335221Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671335221Cleaning up temporary data from old regions at 1731671335232 (+11 ms)Region opened successfully at 1731671335237 (+5 ms) 2024-11-15T11:48:55,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:48:55,237 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:48:55,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:48:55,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:48:55,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:48:55,238 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:48:55,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671335237Disabling compacts and flushes for region at 1731671335237Disabling writes for close at 1731671335237Writing region close event to WAL at 1731671335237Closed at 1731671335237 2024-11-15T11:48:55,239 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:48:55,239 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T11:48:55,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T11:48:55,240 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:48:55,241 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T11:48:55,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:55,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:55,391 DEBUG [7adf9b3d9d04:37705 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T11:48:55,392 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:55,393 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,39085,1731671334464, state=OPENING 2024-11-15T11:48:55,458 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T11:48:55,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:55,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:48:55,497 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:48:55,497 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,39085,1731671334464}] 2024-11-15T11:48:55,497 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:48:55,498 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:48:55,652 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:48:55,654 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58855, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:48:55,658 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T11:48:55,658 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:48:55,660 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C39085%2C1731671334464.meta, suffix=.meta, logDir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464, archiveDir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/oldWALs, maxLogs=32 2024-11-15T11:48:55,660 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C39085%2C1731671334464.meta.1731671335660.meta 2024-11-15T11:48:55,668 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464/7adf9b3d9d04%2C39085%2C1731671334464.meta.1731671335660.meta 2024-11-15T11:48:55,669 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38175:38175),(127.0.0.1/127.0.0.1:45607:45607)] 2024-11-15T11:48:55,682 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:48:55,682 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T11:48:55,682 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T11:48:55,683 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T11:48:55,683 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T11:48:55,683 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:55,683 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T11:48:55,683 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T11:48:55,684 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:48:55,685 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:48:55,685 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:55,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:55,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:48:55,686 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:48:55,686 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:55,687 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:55,687 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:48:55,687 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:48:55,687 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:55,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:55,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:48:55,688 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:48:55,689 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:55,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:48:55,689 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:48:55,690 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740 2024-11-15T11:48:55,691 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740 2024-11-15T11:48:55,692 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:48:55,692 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:48:55,692 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:48:55,693 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:48:55,694 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788973, jitterRate=0.0032318681478500366}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:48:55,694 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T11:48:55,695 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731671335683Writing region info on filesystem at 1731671335683Initializing all the Stores at 1731671335684 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671335684Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671335684Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671335684Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671335684Cleaning up temporary data from old regions at 1731671335692 (+8 ms)Running coprocessor post-open hooks at 1731671335694 (+2 ms)Region opened successfully at 1731671335695 (+1 ms) 2024-11-15T11:48:55,696 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731671335651 2024-11-15T11:48:55,698 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T11:48:55,698 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T11:48:55,699 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:55,700 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,39085,1731671334464, state=OPEN 2024-11-15T11:48:55,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:48:55,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:48:55,733 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:55,733 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:48:55,733 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:48:55,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T11:48:55,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,39085,1731671334464 in 236 msec 2024-11-15T11:48:55,741 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T11:48:55,741 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 499 msec 2024-11-15T11:48:55,742 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:48:55,742 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T11:48:55,743 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:48:55,743 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,39085,1731671334464, seqNum=-1] 2024-11-15T11:48:55,744 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:48:55,745 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46693, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:48:55,751 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 959 msec 2024-11-15T11:48:55,752 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731671335751, completionTime=-1 2024-11-15T11:48:55,752 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T11:48:55,752 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-15T11:48:55,753 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-15T11:48:55,754 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731671395754 2024-11-15T11:48:55,754 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731671455754 2024-11-15T11:48:55,754 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-15T11:48:55,754 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,37705,1731671334324-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:55,754 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,37705,1731671334324-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:55,754 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,37705,1731671334324-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:55,754 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7adf9b3d9d04:37705, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:55,754 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:55,754 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:55,756 DEBUG [master/7adf9b3d9d04:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T11:48:55,758 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.229sec 2024-11-15T11:48:55,758 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T11:48:55,758 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T11:48:55,758 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T11:48:55,758 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T11:48:55,758 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T11:48:55,758 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,37705,1731671334324-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:48:55,758 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,37705,1731671334324-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T11:48:55,761 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T11:48:55,761 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T11:48:55,761 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,37705,1731671334324-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:48:55,792 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27df3092, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:48:55,792 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7adf9b3d9d04,37705,-1 for getting cluster id 2024-11-15T11:48:55,793 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:48:55,794 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ce244ac2-917a-41eb-9d79-87b8d0e87811' 2024-11-15T11:48:55,794 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:48:55,794 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ce244ac2-917a-41eb-9d79-87b8d0e87811" 2024-11-15T11:48:55,794 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@533a12fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:48:55,795 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7adf9b3d9d04,37705,-1] 2024-11-15T11:48:55,795 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:48:55,795 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:48:55,796 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:48:55,797 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63515f8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:48:55,797 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:48:55,798 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,39085,1731671334464, seqNum=-1] 2024-11-15T11:48:55,798 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:48:55,800 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37474, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:48:55,801 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7adf9b3d9d04,37705,1731671334324 2024-11-15T11:48:55,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:48:55,804 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T11:48:55,804 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T11:48:55,805 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 7adf9b3d9d04,37705,1731671334324 2024-11-15T11:48:55,805 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7230ab94 2024-11-15T11:48:55,805 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T11:48:55,807 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39568, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T11:48:55,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37705 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T11:48:55,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37705 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T11:48:55,807 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37705 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:48:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37705 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-15T11:48:55,810 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:48:55,810 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:55,810 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37705 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-15T11:48:55,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37705 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:48:55,812 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:48:55,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741835_1011 (size=381) 2024-11-15T11:48:55,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741835_1011 (size=381) 2024-11-15T11:48:55,822 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => eed6ab3436672cf1ecb2e52ea172410b, NAME => 'TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09 2024-11-15T11:48:55,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741836_1012 (size=64) 2024-11-15T11:48:55,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741836_1012 (size=64) 2024-11-15T11:48:55,828 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:55,828 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing eed6ab3436672cf1ecb2e52ea172410b, disabling compactions & flushes 2024-11-15T11:48:55,828 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:48:55,828 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:48:55,828 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. after waiting 0 ms 2024-11-15T11:48:55,829 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:48:55,829 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:48:55,829 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for eed6ab3436672cf1ecb2e52ea172410b: Waiting for close lock at 1731671335828Disabling compacts and flushes for region at 1731671335828Disabling writes for close at 1731671335828Writing region close event to WAL at 1731671335829 (+1 ms)Closed at 1731671335829 2024-11-15T11:48:55,830 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:48:55,830 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731671335830"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731671335830"}]},"ts":"1731671335830"} 2024-11-15T11:48:55,833 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T11:48:55,834 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:48:55,834 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731671335834"}]},"ts":"1731671335834"} 2024-11-15T11:48:55,837 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-15T11:48:55,837 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eed6ab3436672cf1ecb2e52ea172410b, ASSIGN}] 2024-11-15T11:48:55,838 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eed6ab3436672cf1ecb2e52ea172410b, ASSIGN 2024-11-15T11:48:55,839 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eed6ab3436672cf1ecb2e52ea172410b, ASSIGN; state=OFFLINE, location=7adf9b3d9d04,39085,1731671334464; forceNewPlan=false, retain=false 2024-11-15T11:48:55,990 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=eed6ab3436672cf1ecb2e52ea172410b, regionState=OPENING, regionLocation=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:55,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eed6ab3436672cf1ecb2e52ea172410b, ASSIGN because future has completed 2024-11-15T11:48:55,993 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure eed6ab3436672cf1ecb2e52ea172410b, server=7adf9b3d9d04,39085,1731671334464}] 2024-11-15T11:48:56,150 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:48:56,150 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => eed6ab3436672cf1ecb2e52ea172410b, NAME => 'TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:48:56,150 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,150 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:48:56,150 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,150 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,151 INFO [StoreOpener-eed6ab3436672cf1ecb2e52ea172410b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,153 INFO [StoreOpener-eed6ab3436672cf1ecb2e52ea172410b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eed6ab3436672cf1ecb2e52ea172410b columnFamilyName info 2024-11-15T11:48:56,153 DEBUG [StoreOpener-eed6ab3436672cf1ecb2e52ea172410b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:48:56,153 INFO [StoreOpener-eed6ab3436672cf1ecb2e52ea172410b-1 {}] regionserver.HStore(327): Store=eed6ab3436672cf1ecb2e52ea172410b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:48:56,153 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,154 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,155 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,155 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,155 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,157 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,159 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:48:56,159 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened eed6ab3436672cf1ecb2e52ea172410b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729605, jitterRate=-0.07226009666919708}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:48:56,160 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:48:56,160 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for eed6ab3436672cf1ecb2e52ea172410b: Running coprocessor pre-open hook at 1731671336150Writing region info on filesystem at 1731671336150Initializing all the Stores at 1731671336151 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671336151Cleaning up temporary data from old regions at 1731671336155 (+4 ms)Running coprocessor post-open hooks at 1731671336160 (+5 ms)Region opened successfully at 1731671336160 2024-11-15T11:48:56,161 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b., pid=6, masterSystemTime=1731671336146 2024-11-15T11:48:56,164 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:48:56,164 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:48:56,164 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=eed6ab3436672cf1ecb2e52ea172410b, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:48:56,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure eed6ab3436672cf1ecb2e52ea172410b, server=7adf9b3d9d04,39085,1731671334464 because future has completed 2024-11-15T11:48:56,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T11:48:56,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure eed6ab3436672cf1ecb2e52ea172410b, server=7adf9b3d9d04,39085,1731671334464 in 175 msec 2024-11-15T11:48:56,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T11:48:56,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eed6ab3436672cf1ecb2e52ea172410b, ASSIGN in 334 msec 2024-11-15T11:48:56,176 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:48:56,176 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731671336176"}]},"ts":"1731671336176"} 2024-11-15T11:48:56,178 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-15T11:48:56,180 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:48:56,182 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 373 msec 2024-11-15T11:48:56,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:56,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:57,092 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:57,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:57,637 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T11:48:57,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,666 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:57,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:48:58,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:58,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:59,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:48:59,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:00,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:00,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:00,847 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T11:49:00,848 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-15T11:49:01,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-15T11:49:01,017 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T11:49:01,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T11:49:01,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:01,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:02,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:02,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:03,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:03,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:04,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:04,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:05,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:05,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:05,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37705 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:49:05,912 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-15T11:49:05,912 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-15T11:49:05,916 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-15T11:49:05,916 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:49:05,919 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b., hostname=7adf9b3d9d04,39085,1731671334464, seqNum=2] 2024-11-15T11:49:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:05,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eed6ab3436672cf1ecb2e52ea172410b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:49:05,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/90f06126d0544c28ade0e9ea2b878c69 is 1080, key is row0001/info:/1731671345920/Put/seqid=0 2024-11-15T11:49:05,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741837_1013 (size=12509) 2024-11-15T11:49:05,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741837_1013 (size=12509) 2024-11-15T11:49:05,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/90f06126d0544c28ade0e9ea2b878c69 2024-11-15T11:49:05,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/90f06126d0544c28ade0e9ea2b878c69 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/90f06126d0544c28ade0e9ea2b878c69 2024-11-15T11:49:05,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/90f06126d0544c28ade0e9ea2b878c69, entries=7, sequenceid=11, filesize=12.2 K 2024-11-15T11:49:05,983 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for eed6ab3436672cf1ecb2e52ea172410b in 48ms, sequenceid=11, compaction requested=false 2024-11-15T11:49:05,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eed6ab3436672cf1ecb2e52ea172410b: 2024-11-15T11:49:05,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:05,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eed6ab3436672cf1ecb2e52ea172410b 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-15T11:49:05,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/5ba2d1ea19774f04becdca0e15feea2e is 1080, key is row0008/info:/1731671345936/Put/seqid=0 2024-11-15T11:49:05,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741838_1014 (size=29761) 2024-11-15T11:49:05,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741838_1014 (size=29761) 2024-11-15T11:49:05,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/5ba2d1ea19774f04becdca0e15feea2e 2024-11-15T11:49:06,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/5ba2d1ea19774f04becdca0e15feea2e as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/5ba2d1ea19774f04becdca0e15feea2e 2024-11-15T11:49:06,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/5ba2d1ea19774f04becdca0e15feea2e, entries=23, sequenceid=37, filesize=29.1 K 2024-11-15T11:49:06,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for eed6ab3436672cf1ecb2e52ea172410b in 23ms, sequenceid=37, compaction requested=false 2024-11-15T11:49:06,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eed6ab3436672cf1ecb2e52ea172410b: 2024-11-15T11:49:06,008 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-15T11:49:06,008 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:06,008 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/5ba2d1ea19774f04becdca0e15feea2e because midkey is the same as first or last row 2024-11-15T11:49:06,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:06,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:06,520 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T11:49:06,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,552 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,552 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,553 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,553 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,553 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:06,561 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:07,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:07,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:08,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eed6ab3436672cf1ecb2e52ea172410b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:49:08,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/1fc35b8bfb0a4cc085b35dec091fefa5 is 1080, key is row0031/info:/1731671345985/Put/seqid=0 2024-11-15T11:49:08,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741839_1015 (size=12509) 2024-11-15T11:49:08,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741839_1015 (size=12509) 2024-11-15T11:49:08,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/1fc35b8bfb0a4cc085b35dec091fefa5 2024-11-15T11:49:08,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/1fc35b8bfb0a4cc085b35dec091fefa5 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/1fc35b8bfb0a4cc085b35dec091fefa5 2024-11-15T11:49:08,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/1fc35b8bfb0a4cc085b35dec091fefa5, entries=7, sequenceid=47, filesize=12.2 K 2024-11-15T11:49:08,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for eed6ab3436672cf1ecb2e52ea172410b in 27ms, sequenceid=47, compaction requested=true 2024-11-15T11:49:08,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eed6ab3436672cf1ecb2e52ea172410b: 2024-11-15T11:49:08,026 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-15T11:49:08,026 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:08,026 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/5ba2d1ea19774f04becdca0e15feea2e because midkey is the same as first or last row 2024-11-15T11:49:08,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eed6ab3436672cf1ecb2e52ea172410b:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:49:08,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:08,027 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:49:08,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:08,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eed6ab3436672cf1ecb2e52ea172410b 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T11:49:08,028 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:49:08,028 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1541): eed6ab3436672cf1ecb2e52ea172410b/info is initiating minor compaction (all files) 2024-11-15T11:49:08,028 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eed6ab3436672cf1ecb2e52ea172410b/info in TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:49:08,028 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/90f06126d0544c28ade0e9ea2b878c69, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/5ba2d1ea19774f04becdca0e15feea2e, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/1fc35b8bfb0a4cc085b35dec091fefa5] into tmpdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp, totalSize=53.5 K 2024-11-15T11:49:08,028 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 90f06126d0544c28ade0e9ea2b878c69, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731671345920 2024-11-15T11:49:08,029 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5ba2d1ea19774f04becdca0e15feea2e, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731671345936 2024-11-15T11:49:08,029 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1fc35b8bfb0a4cc085b35dec091fefa5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731671345985 2024-11-15T11:49:08,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/ea078ca2917a4906b434e4965b312115 is 1080, key is row0038/info:/1731671348001/Put/seqid=0 2024-11-15T11:49:08,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741840_1016 (size=17894) 2024-11-15T11:49:08,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741840_1016 (size=17894) 2024-11-15T11:49:08,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=62 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/ea078ca2917a4906b434e4965b312115 2024-11-15T11:49:08,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/ea078ca2917a4906b434e4965b312115 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/ea078ca2917a4906b434e4965b312115 2024-11-15T11:49:08,058 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eed6ab3436672cf1ecb2e52ea172410b#info#compaction#60 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:49:08,059 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/3e190723c1c44326a82e85dc3d25b14e is 1080, key is row0001/info:/1731671345920/Put/seqid=0 2024-11-15T11:49:08,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/ea078ca2917a4906b434e4965b312115, entries=12, sequenceid=62, filesize=17.5 K 2024-11-15T11:49:08,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=15.76 KB/16140 for eed6ab3436672cf1ecb2e52ea172410b in 37ms, sequenceid=62, compaction requested=false 2024-11-15T11:49:08,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eed6ab3436672cf1ecb2e52ea172410b: 2024-11-15T11:49:08,064 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.0 K, sizeToCheck=16.0 K 2024-11-15T11:49:08,064 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:08,064 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/5ba2d1ea19774f04becdca0e15feea2e because midkey is the same as first or last row 2024-11-15T11:49:08,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741841_1017 (size=44978) 2024-11-15T11:49:08,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741841_1017 (size=44978) 2024-11-15T11:49:08,071 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/3e190723c1c44326a82e85dc3d25b14e as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e 2024-11-15T11:49:08,077 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eed6ab3436672cf1ecb2e52ea172410b/info of eed6ab3436672cf1ecb2e52ea172410b into 3e190723c1c44326a82e85dc3d25b14e(size=43.9 K), total size for store is 61.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:49:08,077 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eed6ab3436672cf1ecb2e52ea172410b: 2024-11-15T11:49:08,077 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b., storeName=eed6ab3436672cf1ecb2e52ea172410b/info, priority=13, startTime=1731671348026; duration=0sec 2024-11-15T11:49:08,077 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-15T11:49:08,077 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:08,078 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e because midkey is the same as first or last row 2024-11-15T11:49:08,078 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-15T11:49:08,078 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:08,078 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e because midkey is the same as first or last row 2024-11-15T11:49:08,078 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-15T11:49:08,078 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:08,078 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e because midkey is the same as first or last row 2024-11-15T11:49:08,078 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:08,078 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eed6ab3436672cf1ecb2e52ea172410b:info 2024-11-15T11:49:08,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:08,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:09,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:09,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:10,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eed6ab3436672cf1ecb2e52ea172410b 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-15T11:49:10,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/177b34efcc0740379e6c3702d6c5cd21 is 1080, key is row0050/info:/1731671348028/Put/seqid=0 2024-11-15T11:49:10,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741842_1018 (size=22222) 2024-11-15T11:49:10,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741842_1018 (size=22222) 2024-11-15T11:49:10,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/177b34efcc0740379e6c3702d6c5cd21 2024-11-15T11:49:10,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/177b34efcc0740379e6c3702d6c5cd21 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/177b34efcc0740379e6c3702d6c5cd21 2024-11-15T11:49:10,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/177b34efcc0740379e6c3702d6c5cd21, entries=16, sequenceid=82, filesize=21.7 K 2024-11-15T11:49:10,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for eed6ab3436672cf1ecb2e52ea172410b in 30ms, sequenceid=82, compaction requested=true 2024-11-15T11:49:10,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eed6ab3436672cf1ecb2e52ea172410b: 2024-11-15T11:49:10,094 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-15T11:49:10,094 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:10,095 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e because midkey is the same as first or last row 2024-11-15T11:49:10,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eed6ab3436672cf1ecb2e52ea172410b:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:49:10,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:10,095 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:49:10,096 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:49:10,096 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1541): eed6ab3436672cf1ecb2e52ea172410b/info is initiating minor compaction (all files) 2024-11-15T11:49:10,096 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eed6ab3436672cf1ecb2e52ea172410b/info in TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:49:10,096 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/ea078ca2917a4906b434e4965b312115, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/177b34efcc0740379e6c3702d6c5cd21] into tmpdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp, totalSize=83.1 K 2024-11-15T11:49:10,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eed6ab3436672cf1ecb2e52ea172410b 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-15T11:49:10,097 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3e190723c1c44326a82e85dc3d25b14e, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731671345920 2024-11-15T11:49:10,097 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting ea078ca2917a4906b434e4965b312115, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1731671348001 2024-11-15T11:49:10,097 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 177b34efcc0740379e6c3702d6c5cd21, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731671348028 2024-11-15T11:49:10,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/e6f26f241d4d45eea80ebf2a2a5cc0ce is 1080, key is row0066/info:/1731671350066/Put/seqid=0 2024-11-15T11:49:10,113 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eed6ab3436672cf1ecb2e52ea172410b#info#compaction#63 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:49:10,114 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/0ffb8896242d44308d52fe37cf3f67bd is 1080, key is row0001/info:/1731671345920/Put/seqid=0 2024-11-15T11:49:10,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741843_1019 (size=20064) 2024-11-15T11:49:10,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741843_1019 (size=20064) 2024-11-15T11:49:10,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/e6f26f241d4d45eea80ebf2a2a5cc0ce 2024-11-15T11:49:10,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741844_1020 (size=75378) 2024-11-15T11:49:10,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741844_1020 (size=75378) 2024-11-15T11:49:10,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/e6f26f241d4d45eea80ebf2a2a5cc0ce as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/e6f26f241d4d45eea80ebf2a2a5cc0ce 2024-11-15T11:49:10,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/e6f26f241d4d45eea80ebf2a2a5cc0ce, entries=14, sequenceid=99, filesize=19.6 K 2024-11-15T11:49:10,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for eed6ab3436672cf1ecb2e52ea172410b in 35ms, sequenceid=99, compaction requested=false 2024-11-15T11:49:10,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eed6ab3436672cf1ecb2e52ea172410b: 2024-11-15T11:49:10,131 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-11-15T11:49:10,131 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:10,131 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e because midkey is the same as first or last row 2024-11-15T11:49:10,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eed6ab3436672cf1ecb2e52ea172410b 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-15T11:49:10,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/95bacbe698c64d3f96640d0e6d0ca5e4 is 1080, key is row0080/info:/1731671350098/Put/seqid=0 2024-11-15T11:49:10,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741845_1021 (size=18987) 2024-11-15T11:49:10,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741845_1021 (size=18987) 2024-11-15T11:49:10,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/95bacbe698c64d3f96640d0e6d0ca5e4 2024-11-15T11:49:10,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/95bacbe698c64d3f96640d0e6d0ca5e4 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/95bacbe698c64d3f96640d0e6d0ca5e4 2024-11-15T11:49:10,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/95bacbe698c64d3f96640d0e6d0ca5e4, entries=13, sequenceid=115, filesize=18.5 K 2024-11-15T11:49:10,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for eed6ab3436672cf1ecb2e52ea172410b in 22ms, sequenceid=115, compaction requested=false 2024-11-15T11:49:10,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eed6ab3436672cf1ecb2e52ea172410b: 2024-11-15T11:49:10,154 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=121.2 K, sizeToCheck=16.0 K 2024-11-15T11:49:10,154 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:10,154 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e because midkey is the same as first or last row 2024-11-15T11:49:10,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:10,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:10,529 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/0ffb8896242d44308d52fe37cf3f67bd as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/0ffb8896242d44308d52fe37cf3f67bd 2024-11-15T11:49:10,537 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eed6ab3436672cf1ecb2e52ea172410b/info of eed6ab3436672cf1ecb2e52ea172410b into 0ffb8896242d44308d52fe37cf3f67bd(size=73.6 K), total size for store is 111.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:49:10,537 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eed6ab3436672cf1ecb2e52ea172410b: 2024-11-15T11:49:10,537 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b., storeName=eed6ab3436672cf1ecb2e52ea172410b/info, priority=13, startTime=1731671350095; duration=0sec 2024-11-15T11:49:10,537 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-11-15T11:49:10,538 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:10,538 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-11-15T11:49:10,538 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:10,538 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-11-15T11:49:10,538 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T11:49:10,539 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:10,539 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:10,539 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eed6ab3436672cf1ecb2e52ea172410b:info 2024-11-15T11:49:10,540 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37705 {}] assignment.AssignmentManager(1363): Split request from 7adf9b3d9d04,39085,1731671334464, parent={ENCODED => eed6ab3436672cf1ecb2e52ea172410b, NAME => 'TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-15T11:49:10,546 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37705 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:49:10,549 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37705 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=eed6ab3436672cf1ecb2e52ea172410b, daughterA=54d9b36c4ac742307d7d87ca14264576, daughterB=49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:10,551 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=eed6ab3436672cf1ecb2e52ea172410b, daughterA=54d9b36c4ac742307d7d87ca14264576, daughterB=49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:10,551 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=eed6ab3436672cf1ecb2e52ea172410b, daughterA=54d9b36c4ac742307d7d87ca14264576, daughterB=49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:10,551 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=eed6ab3436672cf1ecb2e52ea172410b, daughterA=54d9b36c4ac742307d7d87ca14264576, daughterB=49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:10,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eed6ab3436672cf1ecb2e52ea172410b, UNASSIGN}] 2024-11-15T11:49:10,559 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eed6ab3436672cf1ecb2e52ea172410b, UNASSIGN 2024-11-15T11:49:10,561 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=eed6ab3436672cf1ecb2e52ea172410b, regionState=CLOSING, regionLocation=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:49:10,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eed6ab3436672cf1ecb2e52ea172410b, UNASSIGN because future has completed 2024-11-15T11:49:10,563 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-15T11:49:10,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure eed6ab3436672cf1ecb2e52ea172410b, server=7adf9b3d9d04,39085,1731671334464}] 2024-11-15T11:49:10,721 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,721 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-15T11:49:10,722 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing eed6ab3436672cf1ecb2e52ea172410b, disabling compactions & flushes 2024-11-15T11:49:10,722 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:49:10,722 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:49:10,722 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. after waiting 0 ms 2024-11-15T11:49:10,722 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:49:10,722 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing eed6ab3436672cf1ecb2e52ea172410b 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-15T11:49:10,752 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/c65888852b6547089311f91929c56cd3 is 1080, key is row0093/info:/1731671350133/Put/seqid=0 2024-11-15T11:49:10,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741846_1022 (size=9270) 2024-11-15T11:49:10,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741846_1022 (size=9270) 2024-11-15T11:49:10,759 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/c65888852b6547089311f91929c56cd3 2024-11-15T11:49:10,766 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/.tmp/info/c65888852b6547089311f91929c56cd3 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/c65888852b6547089311f91929c56cd3 2024-11-15T11:49:10,773 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/c65888852b6547089311f91929c56cd3, entries=4, sequenceid=123, filesize=9.1 K 2024-11-15T11:49:10,774 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for eed6ab3436672cf1ecb2e52ea172410b in 52ms, sequenceid=123, compaction requested=true 2024-11-15T11:49:10,775 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/90f06126d0544c28ade0e9ea2b878c69, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/5ba2d1ea19774f04becdca0e15feea2e, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/1fc35b8bfb0a4cc085b35dec091fefa5, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/ea078ca2917a4906b434e4965b312115, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/177b34efcc0740379e6c3702d6c5cd21] to archive 2024-11-15T11:49:10,776 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T11:49:10,781 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/90f06126d0544c28ade0e9ea2b878c69 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/90f06126d0544c28ade0e9ea2b878c69 2024-11-15T11:49:10,782 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/5ba2d1ea19774f04becdca0e15feea2e to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/5ba2d1ea19774f04becdca0e15feea2e 2024-11-15T11:49:10,783 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/3e190723c1c44326a82e85dc3d25b14e 2024-11-15T11:49:10,784 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/1fc35b8bfb0a4cc085b35dec091fefa5 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/1fc35b8bfb0a4cc085b35dec091fefa5 2024-11-15T11:49:10,786 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/ea078ca2917a4906b434e4965b312115 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/ea078ca2917a4906b434e4965b312115 2024-11-15T11:49:10,787 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/177b34efcc0740379e6c3702d6c5cd21 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/177b34efcc0740379e6c3702d6c5cd21 2024-11-15T11:49:10,793 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-15T11:49:10,794 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. 2024-11-15T11:49:10,795 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for eed6ab3436672cf1ecb2e52ea172410b: Waiting for close lock at 1731671350722Running coprocessor pre-close hooks at 1731671350722Disabling compacts and flushes for region at 1731671350722Disabling writes for close at 1731671350722Obtaining lock to block concurrent updates at 1731671350722Preparing flush snapshotting stores in eed6ab3436672cf1ecb2e52ea172410b at 1731671350722Finished memstore snapshotting TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b., syncing WAL and waiting on mvcc, flushsize=dataSize=4304, getHeapSize=4848, getOffHeapSize=0, getCellsCount=4 at 1731671350723 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. at 1731671350748 (+25 ms)Flushing eed6ab3436672cf1ecb2e52ea172410b/info: creating writer at 1731671350749 (+1 ms)Flushing eed6ab3436672cf1ecb2e52ea172410b/info: appending metadata at 1731671350752 (+3 ms)Flushing eed6ab3436672cf1ecb2e52ea172410b/info: closing flushed file at 1731671350752Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4faf4ea9: reopening flushed file at 1731671350765 (+13 ms)Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for eed6ab3436672cf1ecb2e52ea172410b in 52ms, sequenceid=123, compaction requested=true at 1731671350774 (+9 ms)Writing region close event to WAL at 1731671350789 (+15 ms)Running coprocessor post-close hooks at 1731671350794 (+5 ms)Closed at 1731671350794 2024-11-15T11:49:10,797 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,798 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=eed6ab3436672cf1ecb2e52ea172410b, regionState=CLOSED 2024-11-15T11:49:10,800 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure eed6ab3436672cf1ecb2e52ea172410b, server=7adf9b3d9d04,39085,1731671334464 because future has completed 2024-11-15T11:49:10,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-15T11:49:10,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure eed6ab3436672cf1ecb2e52ea172410b, server=7adf9b3d9d04,39085,1731671334464 in 238 msec 2024-11-15T11:49:10,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-15T11:49:10,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eed6ab3436672cf1ecb2e52ea172410b, UNASSIGN in 247 msec 2024-11-15T11:49:10,815 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:10,821 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=eed6ab3436672cf1ecb2e52ea172410b, threads=4 2024-11-15T11:49:10,824 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/0ffb8896242d44308d52fe37cf3f67bd for region: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,824 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/95bacbe698c64d3f96640d0e6d0ca5e4 for region: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,828 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/c65888852b6547089311f91929c56cd3 for region: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,831 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/e6f26f241d4d45eea80ebf2a2a5cc0ce for region: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,839 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/95bacbe698c64d3f96640d0e6d0ca5e4, top=true 2024-11-15T11:49:10,842 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/c65888852b6547089311f91929c56cd3, top=true 2024-11-15T11:49:10,846 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/e6f26f241d4d45eea80ebf2a2a5cc0ce, top=true 2024-11-15T11:49:10,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741847_1023 (size=27) 2024-11-15T11:49:10,852 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-95bacbe698c64d3f96640d0e6d0ca5e4 for child: 49e5023b322e0a73b13e6bdd2847ca14, parent: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,852 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/95bacbe698c64d3f96640d0e6d0ca5e4 for region: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741847_1023 (size=27) 2024-11-15T11:49:10,864 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-c65888852b6547089311f91929c56cd3 for child: 49e5023b322e0a73b13e6bdd2847ca14, parent: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,864 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/c65888852b6547089311f91929c56cd3 for region: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,869 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-e6f26f241d4d45eea80ebf2a2a5cc0ce for child: 49e5023b322e0a73b13e6bdd2847ca14, parent: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,869 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/e6f26f241d4d45eea80ebf2a2a5cc0ce for region: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741848_1024 (size=27) 2024-11-15T11:49:10,878 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/0ffb8896242d44308d52fe37cf3f67bd for region: eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:10,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741848_1024 (size=27) 2024-11-15T11:49:10,881 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region eed6ab3436672cf1ecb2e52ea172410b Daughter A: [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b] storefiles, Daughter B: [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-95bacbe698c64d3f96640d0e6d0ca5e4, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-c65888852b6547089311f91929c56cd3, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-e6f26f241d4d45eea80ebf2a2a5cc0ce] storefiles. 2024-11-15T11:49:10,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741849_1025 (size=71) 2024-11-15T11:49:10,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741849_1025 (size=71) 2024-11-15T11:49:10,894 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:10,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741850_1026 (size=71) 2024-11-15T11:49:10,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741850_1026 (size=71) 2024-11-15T11:49:10,911 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:10,928 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-15T11:49:10,932 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-15T11:49:10,935 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731671350934"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731671350934"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731671350934"}]},"ts":"1731671350934"} 2024-11-15T11:49:10,935 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731671350934"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731671350934"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731671350934"}]},"ts":"1731671350934"} 2024-11-15T11:49:10,935 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731671350934"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731671350934"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731671350934"}]},"ts":"1731671350934"} 2024-11-15T11:49:10,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=54d9b36c4ac742307d7d87ca14264576, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49e5023b322e0a73b13e6bdd2847ca14, ASSIGN}] 2024-11-15T11:49:10,958 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=54d9b36c4ac742307d7d87ca14264576, ASSIGN 2024-11-15T11:49:10,958 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49e5023b322e0a73b13e6bdd2847ca14, ASSIGN 2024-11-15T11:49:10,959 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=54d9b36c4ac742307d7d87ca14264576, ASSIGN; state=SPLITTING_NEW, location=7adf9b3d9d04,39085,1731671334464; forceNewPlan=false, retain=false 2024-11-15T11:49:10,959 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49e5023b322e0a73b13e6bdd2847ca14, ASSIGN; state=SPLITTING_NEW, location=7adf9b3d9d04,39085,1731671334464; forceNewPlan=false, retain=false 2024-11-15T11:49:11,016 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-15T11:49:11,110 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=54d9b36c4ac742307d7d87ca14264576, regionState=OPENING, regionLocation=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:49:11,110 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=49e5023b322e0a73b13e6bdd2847ca14, regionState=OPENING, regionLocation=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:49:11,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=54d9b36c4ac742307d7d87ca14264576, ASSIGN because future has completed 2024-11-15T11:49:11,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 54d9b36c4ac742307d7d87ca14264576, server=7adf9b3d9d04,39085,1731671334464}] 2024-11-15T11:49:11,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49e5023b322e0a73b13e6bdd2847ca14, ASSIGN because future has completed 2024-11-15T11:49:11,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49e5023b322e0a73b13e6bdd2847ca14, server=7adf9b3d9d04,39085,1731671334464}] 2024-11-15T11:49:11,270 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. 2024-11-15T11:49:11,270 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 54d9b36c4ac742307d7d87ca14264576, NAME => 'TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-15T11:49:11,270 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,270 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:49:11,270 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,270 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,272 INFO [StoreOpener-54d9b36c4ac742307d7d87ca14264576-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,272 INFO [StoreOpener-54d9b36c4ac742307d7d87ca14264576-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 54d9b36c4ac742307d7d87ca14264576 columnFamilyName info 2024-11-15T11:49:11,272 DEBUG [StoreOpener-54d9b36c4ac742307d7d87ca14264576-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:11,285 DEBUG [StoreOpener-54d9b36c4ac742307d7d87ca14264576-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b->hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/0ffb8896242d44308d52fe37cf3f67bd-bottom 2024-11-15T11:49:11,286 INFO [StoreOpener-54d9b36c4ac742307d7d87ca14264576-1 {}] regionserver.HStore(327): Store=54d9b36c4ac742307d7d87ca14264576/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:49:11,286 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,287 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,288 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,289 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,289 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,291 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,293 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 54d9b36c4ac742307d7d87ca14264576; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856629, jitterRate=0.08926014602184296}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:49:11,293 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:11,294 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 54d9b36c4ac742307d7d87ca14264576: Running coprocessor pre-open hook at 1731671351271Writing region info on filesystem at 1731671351271Initializing all the Stores at 1731671351271Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671351271Cleaning up temporary data from old regions at 1731671351289 (+18 ms)Running coprocessor post-open hooks at 1731671351293 (+4 ms)Region opened successfully at 1731671351294 (+1 ms) 2024-11-15T11:49:11,295 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576., pid=12, masterSystemTime=1731671351266 2024-11-15T11:49:11,295 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 54d9b36c4ac742307d7d87ca14264576:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:49:11,295 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-15T11:49:11,295 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:11,296 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. 2024-11-15T11:49:11,296 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1541): 54d9b36c4ac742307d7d87ca14264576/info is initiating minor compaction (all files) 2024-11-15T11:49:11,296 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 54d9b36c4ac742307d7d87ca14264576/info in TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. 2024-11-15T11:49:11,296 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b->hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/0ffb8896242d44308d52fe37cf3f67bd-bottom] into tmpdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/.tmp, totalSize=73.6 K 2024-11-15T11:49:11,297 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731671345920 2024-11-15T11:49:11,298 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. 2024-11-15T11:49:11,298 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. 2024-11-15T11:49:11,298 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:11,299 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=54d9b36c4ac742307d7d87ca14264576, regionState=OPEN, openSeqNum=127, regionLocation=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:49:11,299 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 49e5023b322e0a73b13e6bdd2847ca14, NAME => 'TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-15T11:49:11,299 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,299 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:49:11,299 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,299 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,301 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-15T11:49:11,302 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-15T11:49:11,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-15T11:49:11,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 54d9b36c4ac742307d7d87ca14264576, server=7adf9b3d9d04,39085,1731671334464 because future has completed 2024-11-15T11:49:11,305 INFO [StoreOpener-49e5023b322e0a73b13e6bdd2847ca14-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,307 INFO [StoreOpener-49e5023b322e0a73b13e6bdd2847ca14-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 49e5023b322e0a73b13e6bdd2847ca14 columnFamilyName info 2024-11-15T11:49:11,307 DEBUG [StoreOpener-49e5023b322e0a73b13e6bdd2847ca14-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:11,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-15T11:49:11,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 54d9b36c4ac742307d7d87ca14264576, server=7adf9b3d9d04,39085,1731671334464 in 191 msec 2024-11-15T11:49:11,309 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=54d9b36c4ac742307d7d87ca14264576, ASSIGN in 352 msec 2024-11-15T11:49:11,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/info/d6cbdd7f6c134434b3240d99ee1f7334 is 193, key is TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14./info:regioninfo/1731671351110/Put/seqid=0 2024-11-15T11:49:11,324 DEBUG [StoreOpener-49e5023b322e0a73b13e6bdd2847ca14-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b->hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/0ffb8896242d44308d52fe37cf3f67bd-top 2024-11-15T11:49:11,330 DEBUG [StoreOpener-49e5023b322e0a73b13e6bdd2847ca14-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-95bacbe698c64d3f96640d0e6d0ca5e4 2024-11-15T11:49:11,334 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54d9b36c4ac742307d7d87ca14264576#info#compaction#67 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:49:11,334 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/.tmp/info/2dcb1d350c9343aa9aafc425551059e6 is 1080, key is row0001/info:/1731671345920/Put/seqid=0 2024-11-15T11:49:11,336 DEBUG [StoreOpener-49e5023b322e0a73b13e6bdd2847ca14-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-c65888852b6547089311f91929c56cd3 2024-11-15T11:49:11,341 DEBUG [StoreOpener-49e5023b322e0a73b13e6bdd2847ca14-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-e6f26f241d4d45eea80ebf2a2a5cc0ce 2024-11-15T11:49:11,341 INFO [StoreOpener-49e5023b322e0a73b13e6bdd2847ca14-1 {}] regionserver.HStore(327): Store=49e5023b322e0a73b13e6bdd2847ca14/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:49:11,341 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,342 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,343 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,344 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,344 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,347 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,347 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 49e5023b322e0a73b13e6bdd2847ca14; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866788, jitterRate=0.10217870771884918}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:49:11,348 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:11,348 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 49e5023b322e0a73b13e6bdd2847ca14: Running coprocessor pre-open hook at 1731671351299Writing region info on filesystem at 1731671351299Initializing all the Stores at 1731671351303 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671351303Cleaning up temporary data from old regions at 1731671351344 (+41 ms)Running coprocessor post-open hooks at 1731671351348 (+4 ms)Region opened successfully at 1731671351348 2024-11-15T11:49:11,348 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., pid=13, masterSystemTime=1731671351266 2024-11-15T11:49:11,349 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 49e5023b322e0a73b13e6bdd2847ca14:info, priority=-2147483648, current under compaction store size is 2 2024-11-15T11:49:11,349 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-15T11:49:11,349 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:11,350 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:11,350 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HStore(1541): 49e5023b322e0a73b13e6bdd2847ca14/info is initiating minor compaction (all files) 2024-11-15T11:49:11,350 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49e5023b322e0a73b13e6bdd2847ca14/info in TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:11,351 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b->hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/0ffb8896242d44308d52fe37cf3f67bd-top, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-e6f26f241d4d45eea80ebf2a2a5cc0ce, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-95bacbe698c64d3f96640d0e6d0ca5e4, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-c65888852b6547089311f91929c56cd3] into tmpdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp, totalSize=120.8 K 2024-11-15T11:49:11,351 DEBUG [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:11,351 INFO [RS_OPEN_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:11,352 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=49e5023b322e0a73b13e6bdd2847ca14, regionState=OPEN, openSeqNum=127, regionLocation=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:49:11,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49e5023b322e0a73b13e6bdd2847ca14, server=7adf9b3d9d04,39085,1731671334464 because future has completed 2024-11-15T11:49:11,354 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] compactions.Compactor(225): Compacting 0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731671345920 2024-11-15T11:49:11,355 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-e6f26f241d4d45eea80ebf2a2a5cc0ce, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731671350066 2024-11-15T11:49:11,355 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-95bacbe698c64d3f96640d0e6d0ca5e4, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731671350098 2024-11-15T11:49:11,356 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-c65888852b6547089311f91929c56cd3, keycount=4, bloomtype=ROW, size=9.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731671350133 2024-11-15T11:49:11,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741851_1027 (size=9847) 2024-11-15T11:49:11,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741851_1027 (size=9847) 2024-11-15T11:49:11,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/info/d6cbdd7f6c134434b3240d99ee1f7334 2024-11-15T11:49:11,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741852_1028 (size=70862) 2024-11-15T11:49:11,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741852_1028 (size=70862) 2024-11-15T11:49:11,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-15T11:49:11,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 49e5023b322e0a73b13e6bdd2847ca14, server=7adf9b3d9d04,39085,1731671334464 in 257 msec 2024-11-15T11:49:11,380 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-15T11:49:11,381 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49e5023b322e0a73b13e6bdd2847ca14, ASSIGN in 419 msec 2024-11-15T11:49:11,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=eed6ab3436672cf1ecb2e52ea172410b, daughterA=54d9b36c4ac742307d7d87ca14264576, daughterB=49e5023b322e0a73b13e6bdd2847ca14 in 835 msec 2024-11-15T11:49:11,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:11,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:11,390 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/.tmp/info/2dcb1d350c9343aa9aafc425551059e6 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/info/2dcb1d350c9343aa9aafc425551059e6 2024-11-15T11:49:11,403 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 54d9b36c4ac742307d7d87ca14264576/info of 54d9b36c4ac742307d7d87ca14264576 into 2dcb1d350c9343aa9aafc425551059e6(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:49:11,403 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 54d9b36c4ac742307d7d87ca14264576: 2024-11-15T11:49:11,403 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576., storeName=54d9b36c4ac742307d7d87ca14264576/info, priority=15, startTime=1731671351295; duration=0sec 2024-11-15T11:49:11,403 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:11,403 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54d9b36c4ac742307d7d87ca14264576:info 2024-11-15T11:49:11,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/ns/7c572f586d5b4f75ab47fc30c400840f is 43, key is default/ns:d/1731671335746/Put/seqid=0 2024-11-15T11:49:11,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741853_1029 (size=5153) 2024-11-15T11:49:11,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741853_1029 (size=5153) 2024-11-15T11:49:11,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/ns/7c572f586d5b4f75ab47fc30c400840f 2024-11-15T11:49:11,424 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49e5023b322e0a73b13e6bdd2847ca14#info#compaction#69 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:49:11,425 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/d2366b5d056e41e1847295be29326367 is 1080, key is row0062/info:/1731671348055/Put/seqid=0 2024-11-15T11:49:11,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741854_1030 (size=43081) 2024-11-15T11:49:11,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741854_1030 (size=43081) 2024-11-15T11:49:11,447 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/d2366b5d056e41e1847295be29326367 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2366b5d056e41e1847295be29326367 2024-11-15T11:49:11,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/table/9ce0b20059584dbd9feb2c264180632f is 65, key is TestLogRolling-testLogRolling/table:state/1731671336176/Put/seqid=0 2024-11-15T11:49:11,455 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 49e5023b322e0a73b13e6bdd2847ca14/info of 49e5023b322e0a73b13e6bdd2847ca14 into d2366b5d056e41e1847295be29326367(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:49:11,455 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:11,455 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., storeName=49e5023b322e0a73b13e6bdd2847ca14/info, priority=12, startTime=1731671351349; duration=0sec 2024-11-15T11:49:11,455 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:11,455 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49e5023b322e0a73b13e6bdd2847ca14:info 2024-11-15T11:49:11,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741855_1031 (size=5340) 2024-11-15T11:49:11,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741855_1031 (size=5340) 2024-11-15T11:49:11,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/table/9ce0b20059584dbd9feb2c264180632f 2024-11-15T11:49:11,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/info/d6cbdd7f6c134434b3240d99ee1f7334 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/info/d6cbdd7f6c134434b3240d99ee1f7334 2024-11-15T11:49:11,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/info/d6cbdd7f6c134434b3240d99ee1f7334, entries=30, sequenceid=17, filesize=9.6 K 2024-11-15T11:49:11,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/ns/7c572f586d5b4f75ab47fc30c400840f as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/ns/7c572f586d5b4f75ab47fc30c400840f 2024-11-15T11:49:11,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/ns/7c572f586d5b4f75ab47fc30c400840f, entries=2, sequenceid=17, filesize=5.0 K 2024-11-15T11:49:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/table/9ce0b20059584dbd9feb2c264180632f as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/table/9ce0b20059584dbd9feb2c264180632f 2024-11-15T11:49:11,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/table/9ce0b20059584dbd9feb2c264180632f, entries=2, sequenceid=17, filesize=5.2 K 2024-11-15T11:49:11,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 195ms, sequenceid=17, compaction requested=false 2024-11-15T11:49:11,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T11:49:12,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37474 deadline: 1731671362142, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. is not online on 7adf9b3d9d04,39085,1731671334464 2024-11-15T11:49:12,169 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b., hostname=7adf9b3d9d04,39085,1731671334464, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b., hostname=7adf9b3d9d04,39085,1731671334464, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. is not online on 7adf9b3d9d04,39085,1731671334464 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:49:12,170 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b., hostname=7adf9b3d9d04,39085,1731671334464, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b. is not online on 7adf9b3d9d04,39085,1731671334464 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:49:12,170 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731671335807.eed6ab3436672cf1ecb2e52ea172410b., hostname=7adf9b3d9d04,39085,1731671334464, seqNum=2 from cache 2024-11-15T11:49:12,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:12,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:13,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:13,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:14,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:14,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:15,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:15,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:16,297 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T11:49:16,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T11:49:16,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:16,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:17,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:17,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:18,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:18,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:19,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:19,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:20,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:20,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:21,016 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-15T11:49:21,016 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T11:49:21,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:21,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:22,250 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., hostname=7adf9b3d9d04,39085,1731671334464, seqNum=127] 2024-11-15T11:49:22,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:22,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:49:22,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/b08d196ba7b941539b22d70dc48557c3 is 1080, key is row0097/info:/1731671362252/Put/seqid=0 2024-11-15T11:49:22,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741856_1032 (size=12516) 2024-11-15T11:49:22,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741856_1032 (size=12516) 2024-11-15T11:49:22,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/b08d196ba7b941539b22d70dc48557c3 2024-11-15T11:49:22,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/b08d196ba7b941539b22d70dc48557c3 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/b08d196ba7b941539b22d70dc48557c3 2024-11-15T11:49:22,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/b08d196ba7b941539b22d70dc48557c3, entries=7, sequenceid=137, filesize=12.2 K 2024-11-15T11:49:22,290 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 49e5023b322e0a73b13e6bdd2847ca14 in 27ms, sequenceid=137, compaction requested=false 2024-11-15T11:49:22,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:22,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:22,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-15T11:49:22,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/3a7f9e4b3bc34385bbb408fd2343a01e is 1080, key is row0104/info:/1731671362264/Put/seqid=0 2024-11-15T11:49:22,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741857_1033 (size=19000) 2024-11-15T11:49:22,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741857_1033 (size=19000) 2024-11-15T11:49:22,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/3a7f9e4b3bc34385bbb408fd2343a01e 2024-11-15T11:49:22,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/3a7f9e4b3bc34385bbb408fd2343a01e as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/3a7f9e4b3bc34385bbb408fd2343a01e 2024-11-15T11:49:22,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/3a7f9e4b3bc34385bbb408fd2343a01e, entries=13, sequenceid=153, filesize=18.6 K 2024-11-15T11:49:22,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 49e5023b322e0a73b13e6bdd2847ca14 in 30ms, sequenceid=153, compaction requested=true 2024-11-15T11:49:22,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:22,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49e5023b322e0a73b13e6bdd2847ca14:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:49:22,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:22,323 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:49:22,324 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74597 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:49:22,324 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1541): 49e5023b322e0a73b13e6bdd2847ca14/info is initiating minor compaction (all files) 2024-11-15T11:49:22,324 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49e5023b322e0a73b13e6bdd2847ca14/info in TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:22,324 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2366b5d056e41e1847295be29326367, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/b08d196ba7b941539b22d70dc48557c3, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/3a7f9e4b3bc34385bbb408fd2343a01e] into tmpdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp, totalSize=72.8 K 2024-11-15T11:49:22,325 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting d2366b5d056e41e1847295be29326367, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731671348055 2024-11-15T11:49:22,325 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting b08d196ba7b941539b22d70dc48557c3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731671362252 2024-11-15T11:49:22,325 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3a7f9e4b3bc34385bbb408fd2343a01e, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1731671362264 2024-11-15T11:49:22,337 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49e5023b322e0a73b13e6bdd2847ca14#info#compaction#73 average throughput is 28.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:49:22,338 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/8db3a450c8cc44f7ac0e37d263b6b660 is 1080, key is row0062/info:/1731671348055/Put/seqid=0 2024-11-15T11:49:22,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741858_1034 (size=64811) 2024-11-15T11:49:22,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741858_1034 (size=64811) 2024-11-15T11:49:22,349 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/8db3a450c8cc44f7ac0e37d263b6b660 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8db3a450c8cc44f7ac0e37d263b6b660 2024-11-15T11:49:22,356 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49e5023b322e0a73b13e6bdd2847ca14/info of 49e5023b322e0a73b13e6bdd2847ca14 into 8db3a450c8cc44f7ac0e37d263b6b660(size=63.3 K), total size for store is 63.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:49:22,356 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:22,356 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., storeName=49e5023b322e0a73b13e6bdd2847ca14/info, priority=13, startTime=1731671362323; duration=0sec 2024-11-15T11:49:22,356 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:22,356 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49e5023b322e0a73b13e6bdd2847ca14:info 2024-11-15T11:49:22,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:22,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:23,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:23,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:24,305 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:49:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:24,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-15T11:49:24,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/70efe76ba0804333b947c7512adf078a is 1080, key is row0117/info:/1731671362293/Put/seqid=0 2024-11-15T11:49:24,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741859_1035 (size=19000) 2024-11-15T11:49:24,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741859_1035 (size=19000) 2024-11-15T11:49:24,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/70efe76ba0804333b947c7512adf078a 2024-11-15T11:49:24,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/70efe76ba0804333b947c7512adf078a as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/70efe76ba0804333b947c7512adf078a 2024-11-15T11:49:24,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/70efe76ba0804333b947c7512adf078a, entries=13, sequenceid=170, filesize=18.6 K 2024-11-15T11:49:24,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 49e5023b322e0a73b13e6bdd2847ca14 in 26ms, sequenceid=170, compaction requested=false 2024-11-15T11:49:24,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:24,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T11:49:24,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/0898d1f6e61a45a1bae4b9c38e524e0d is 1080, key is row0130/info:/1731671364324/Put/seqid=0 2024-11-15T11:49:24,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741860_1036 (size=16828) 2024-11-15T11:49:24,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741860_1036 (size=16828) 2024-11-15T11:49:24,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/0898d1f6e61a45a1bae4b9c38e524e0d 2024-11-15T11:49:24,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/0898d1f6e61a45a1bae4b9c38e524e0d as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0898d1f6e61a45a1bae4b9c38e524e0d 2024-11-15T11:49:24,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0898d1f6e61a45a1bae4b9c38e524e0d, entries=11, sequenceid=184, filesize=16.4 K 2024-11-15T11:49:24,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 49e5023b322e0a73b13e6bdd2847ca14 in 28ms, sequenceid=184, compaction requested=true 2024-11-15T11:49:24,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:24,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49e5023b322e0a73b13e6bdd2847ca14:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:49:24,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:24,378 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:49:24,379 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 100639 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:49:24,379 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1541): 49e5023b322e0a73b13e6bdd2847ca14/info is initiating minor compaction (all files) 2024-11-15T11:49:24,379 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49e5023b322e0a73b13e6bdd2847ca14/info in TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:24,380 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8db3a450c8cc44f7ac0e37d263b6b660, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/70efe76ba0804333b947c7512adf078a, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0898d1f6e61a45a1bae4b9c38e524e0d] into tmpdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp, totalSize=98.3 K 2024-11-15T11:49:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:24,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-15T11:49:24,380 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8db3a450c8cc44f7ac0e37d263b6b660, keycount=55, bloomtype=ROW, size=63.3 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1731671348055 2024-11-15T11:49:24,381 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70efe76ba0804333b947c7512adf078a, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731671362293 2024-11-15T11:49:24,381 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0898d1f6e61a45a1bae4b9c38e524e0d, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1731671364324 2024-11-15T11:49:24,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/1f59309ec7644135aedd266539011165 is 1080, key is row0141/info:/1731671364351/Put/seqid=0 2024-11-15T11:49:24,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741861_1037 (size=21156) 2024-11-15T11:49:24,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741861_1037 (size=21156) 2024-11-15T11:49:24,393 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/1f59309ec7644135aedd266539011165 2024-11-15T11:49:24,396 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49e5023b322e0a73b13e6bdd2847ca14#info#compaction#77 average throughput is 40.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:49:24,396 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/1bcebc8c622a4faabcf62abaa2e36857 is 1080, key is row0062/info:/1731671348055/Put/seqid=0 2024-11-15T11:49:24,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:24,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:24,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/1f59309ec7644135aedd266539011165 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1f59309ec7644135aedd266539011165 2024-11-15T11:49:24,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1f59309ec7644135aedd266539011165, entries=15, sequenceid=202, filesize=20.7 K 2024-11-15T11:49:24,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=5.25 KB/5380 for 49e5023b322e0a73b13e6bdd2847ca14 in 28ms, sequenceid=202, compaction requested=false 2024-11-15T11:49:24,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:24,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741862_1038 (size=90862) 2024-11-15T11:49:24,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741862_1038 (size=90862) 2024-11-15T11:49:24,419 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/1bcebc8c622a4faabcf62abaa2e36857 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1bcebc8c622a4faabcf62abaa2e36857 2024-11-15T11:49:24,425 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49e5023b322e0a73b13e6bdd2847ca14/info of 49e5023b322e0a73b13e6bdd2847ca14 into 1bcebc8c622a4faabcf62abaa2e36857(size=88.7 K), total size for store is 109.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:49:24,425 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:24,425 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., storeName=49e5023b322e0a73b13e6bdd2847ca14/info, priority=13, startTime=1731671364378; duration=0sec 2024-11-15T11:49:24,425 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:24,425 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49e5023b322e0a73b13e6bdd2847ca14:info 2024-11-15T11:49:25,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:25,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:26,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:26,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:26,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:26,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:49:26,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/d2aa343d54fb4999b26a8692bce3927a is 1080, key is row0156/info:/1731671364381/Put/seqid=0 2024-11-15T11:49:26,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741863_1039 (size=12516) 2024-11-15T11:49:26,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741863_1039 (size=12516) 2024-11-15T11:49:26,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/d2aa343d54fb4999b26a8692bce3927a 2024-11-15T11:49:26,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/d2aa343d54fb4999b26a8692bce3927a as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2aa343d54fb4999b26a8692bce3927a 2024-11-15T11:49:26,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2aa343d54fb4999b26a8692bce3927a, entries=7, sequenceid=213, filesize=12.2 K 2024-11-15T11:49:26,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 49e5023b322e0a73b13e6bdd2847ca14 in 44ms, sequenceid=213, compaction requested=true 2024-11-15T11:49:26,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:26,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:26,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49e5023b322e0a73b13e6bdd2847ca14:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:49:26,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:26,447 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:49:26,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T11:49:26,448 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 124534 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:49:26,448 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1541): 49e5023b322e0a73b13e6bdd2847ca14/info is initiating minor compaction (all files) 2024-11-15T11:49:26,448 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49e5023b322e0a73b13e6bdd2847ca14/info in TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:26,448 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1bcebc8c622a4faabcf62abaa2e36857, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1f59309ec7644135aedd266539011165, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2aa343d54fb4999b26a8692bce3927a] into tmpdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp, totalSize=121.6 K 2024-11-15T11:49:26,449 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1bcebc8c622a4faabcf62abaa2e36857, keycount=79, bloomtype=ROW, size=88.7 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1731671348055 2024-11-15T11:49:26,449 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1f59309ec7644135aedd266539011165, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1731671364351 2024-11-15T11:49:26,450 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting d2aa343d54fb4999b26a8692bce3927a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731671364381 2024-11-15T11:49:26,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/cca7c432c06341b4a62079dcb053ea75 is 1080, key is row0163/info:/1731671366422/Put/seqid=0 2024-11-15T11:49:26,479 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49e5023b322e0a73b13e6bdd2847ca14#info#compaction#80 average throughput is 25.91 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:49:26,480 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/c7bb88fc96504c8dafe8007173775bb0 is 1080, key is row0062/info:/1731671348055/Put/seqid=0 2024-11-15T11:49:26,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49e5023b322e0a73b13e6bdd2847ca14, server=7adf9b3d9d04,39085,1731671334464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-15T11:49:26,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37474 deadline: 1731671376490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49e5023b322e0a73b13e6bdd2847ca14, server=7adf9b3d9d04,39085,1731671334464 2024-11-15T11:49:26,492 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., hostname=7adf9b3d9d04,39085,1731671334464, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., hostname=7adf9b3d9d04,39085,1731671334464, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49e5023b322e0a73b13e6bdd2847ca14, server=7adf9b3d9d04,39085,1731671334464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:49:26,492 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., hostname=7adf9b3d9d04,39085,1731671334464, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49e5023b322e0a73b13e6bdd2847ca14, server=7adf9b3d9d04,39085,1731671334464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:49:26,492 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., hostname=7adf9b3d9d04,39085,1731671334464, seqNum=127 because the exception is null or not the one we care about 2024-11-15T11:49:26,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741864_1040 (size=16828) 2024-11-15T11:49:26,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741864_1040 (size=16828) 2024-11-15T11:49:26,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/cca7c432c06341b4a62079dcb053ea75 2024-11-15T11:49:26,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/cca7c432c06341b4a62079dcb053ea75 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/cca7c432c06341b4a62079dcb053ea75 2024-11-15T11:49:26,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741865_1041 (size=114684) 2024-11-15T11:49:26,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741865_1041 (size=114684) 2024-11-15T11:49:26,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/cca7c432c06341b4a62079dcb053ea75, entries=11, sequenceid=227, filesize=16.4 K 2024-11-15T11:49:26,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=18.91 KB/19368 for 49e5023b322e0a73b13e6bdd2847ca14 in 66ms, sequenceid=227, compaction requested=false 2024-11-15T11:49:26,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:26,513 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/c7bb88fc96504c8dafe8007173775bb0 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/c7bb88fc96504c8dafe8007173775bb0 2024-11-15T11:49:26,533 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49e5023b322e0a73b13e6bdd2847ca14/info of 49e5023b322e0a73b13e6bdd2847ca14 into c7bb88fc96504c8dafe8007173775bb0(size=112.0 K), total size for store is 128.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:49:26,533 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:26,533 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., storeName=49e5023b322e0a73b13e6bdd2847ca14/info, priority=13, startTime=1731671366447; duration=0sec 2024-11-15T11:49:26,533 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:26,533 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49e5023b322e0a73b13e6bdd2847ca14:info 2024-11-15T11:49:27,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:27,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:28,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:28,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:29,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:29,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:30,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:30,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:31,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:31,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:32,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:32,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:33,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:33,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:34,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:34,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:35,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:35,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:35,761 INFO [master/7adf9b3d9d04:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T11:49:35,761 INFO [master/7adf9b3d9d04:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T11:49:36,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:36,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:36,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:36,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-15T11:49:36,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/8507e33c43c84dfdba17d2d06e1d2ecf is 1080, key is row0174/info:/1731671366448/Put/seqid=0 2024-11-15T11:49:36,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741866_1042 (size=25472) 2024-11-15T11:49:36,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741866_1042 (size=25472) 2024-11-15T11:49:36,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/8507e33c43c84dfdba17d2d06e1d2ecf 2024-11-15T11:49:36,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/8507e33c43c84dfdba17d2d06e1d2ecf as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8507e33c43c84dfdba17d2d06e1d2ecf 2024-11-15T11:49:36,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8507e33c43c84dfdba17d2d06e1d2ecf, entries=19, sequenceid=250, filesize=24.9 K 2024-11-15T11:49:36,576 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=0 B/0 for 49e5023b322e0a73b13e6bdd2847ca14 in 23ms, sequenceid=250, compaction requested=true 2024-11-15T11:49:36,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:36,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49e5023b322e0a73b13e6bdd2847ca14:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:49:36,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:36,576 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:49:36,577 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 156984 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:49:36,578 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1541): 49e5023b322e0a73b13e6bdd2847ca14/info is initiating minor compaction (all files) 2024-11-15T11:49:36,578 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49e5023b322e0a73b13e6bdd2847ca14/info in TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:36,578 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/c7bb88fc96504c8dafe8007173775bb0, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/cca7c432c06341b4a62079dcb053ea75, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8507e33c43c84dfdba17d2d06e1d2ecf] into tmpdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp, totalSize=153.3 K 2024-11-15T11:49:36,578 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting c7bb88fc96504c8dafe8007173775bb0, keycount=101, bloomtype=ROW, size=112.0 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731671348055 2024-11-15T11:49:36,579 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting cca7c432c06341b4a62079dcb053ea75, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1731671366422 2024-11-15T11:49:36,579 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8507e33c43c84dfdba17d2d06e1d2ecf, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731671366448 2024-11-15T11:49:36,592 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49e5023b322e0a73b13e6bdd2847ca14#info#compaction#82 average throughput is 44.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:49:36,593 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/6b91a27717e545ed952df49b5da4ddb5 is 1080, key is row0062/info:/1731671348055/Put/seqid=0 2024-11-15T11:49:36,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741867_1043 (size=147331) 2024-11-15T11:49:36,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741867_1043 (size=147331) 2024-11-15T11:49:36,602 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/6b91a27717e545ed952df49b5da4ddb5 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6b91a27717e545ed952df49b5da4ddb5 2024-11-15T11:49:36,608 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49e5023b322e0a73b13e6bdd2847ca14/info of 49e5023b322e0a73b13e6bdd2847ca14 into 6b91a27717e545ed952df49b5da4ddb5(size=143.9 K), total size for store is 143.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:49:36,608 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:36,608 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., storeName=49e5023b322e0a73b13e6bdd2847ca14/info, priority=13, startTime=1731671376576; duration=0sec 2024-11-15T11:49:36,608 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:36,608 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49e5023b322e0a73b13e6bdd2847ca14:info 2024-11-15T11:49:37,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:37,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:38,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:38,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:38,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:38,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:49:38,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/081b0c46f2264263896f76e0664825c4 is 1080, key is row0193/info:/1731671378554/Put/seqid=0 2024-11-15T11:49:38,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741868_1044 (size=12521) 2024-11-15T11:49:38,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741868_1044 (size=12521) 2024-11-15T11:49:38,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/081b0c46f2264263896f76e0664825c4 2024-11-15T11:49:38,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/081b0c46f2264263896f76e0664825c4 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/081b0c46f2264263896f76e0664825c4 2024-11-15T11:49:38,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/081b0c46f2264263896f76e0664825c4, entries=7, sequenceid=261, filesize=12.2 K 2024-11-15T11:49:38,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 49e5023b322e0a73b13e6bdd2847ca14 in 25ms, sequenceid=261, compaction requested=false 2024-11-15T11:49:38,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:38,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:38,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-15T11:49:38,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/6be5627e802c4f8a81955d2de3775efe is 1080, key is row0200/info:/1731671378567/Put/seqid=0 2024-11-15T11:49:38,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741869_1045 (size=19013) 2024-11-15T11:49:38,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741869_1045 (size=19013) 2024-11-15T11:49:38,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/6be5627e802c4f8a81955d2de3775efe 2024-11-15T11:49:38,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/6be5627e802c4f8a81955d2de3775efe as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6be5627e802c4f8a81955d2de3775efe 2024-11-15T11:49:38,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6be5627e802c4f8a81955d2de3775efe, entries=13, sequenceid=277, filesize=18.6 K 2024-11-15T11:49:38,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 49e5023b322e0a73b13e6bdd2847ca14 in 22ms, sequenceid=277, compaction requested=true 2024-11-15T11:49:38,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:38,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49e5023b322e0a73b13e6bdd2847ca14:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:49:38,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:38,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:38,615 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:49:38,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T11:49:38,616 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 178865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:49:38,617 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1541): 49e5023b322e0a73b13e6bdd2847ca14/info is initiating minor compaction (all files) 2024-11-15T11:49:38,617 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49e5023b322e0a73b13e6bdd2847ca14/info in TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:38,617 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6b91a27717e545ed952df49b5da4ddb5, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/081b0c46f2264263896f76e0664825c4, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6be5627e802c4f8a81955d2de3775efe] into tmpdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp, totalSize=174.7 K 2024-11-15T11:49:38,617 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6b91a27717e545ed952df49b5da4ddb5, keycount=131, bloomtype=ROW, size=143.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731671348055 2024-11-15T11:49:38,617 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 081b0c46f2264263896f76e0664825c4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731671378554 2024-11-15T11:49:38,618 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6be5627e802c4f8a81955d2de3775efe, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1731671378567 2024-11-15T11:49:38,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/99857ba7f7a3457ab44e4e9fb63e917f is 1080, key is row0213/info:/1731671378594/Put/seqid=0 2024-11-15T11:49:38,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741870_1046 (size=16839) 2024-11-15T11:49:38,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741870_1046 (size=16839) 2024-11-15T11:49:38,635 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49e5023b322e0a73b13e6bdd2847ca14#info#compaction#86 average throughput is 51.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:49:38,636 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/197b254bcc94485aa783b6e700fb7178 is 1080, key is row0062/info:/1731671348055/Put/seqid=0 2024-11-15T11:49:38,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741871_1047 (size=169015) 2024-11-15T11:49:38,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741871_1047 (size=169015) 2024-11-15T11:49:38,649 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/197b254bcc94485aa783b6e700fb7178 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/197b254bcc94485aa783b6e700fb7178 2024-11-15T11:49:38,656 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49e5023b322e0a73b13e6bdd2847ca14/info of 49e5023b322e0a73b13e6bdd2847ca14 into 197b254bcc94485aa783b6e700fb7178(size=165.1 K), total size for store is 165.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:49:38,656 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:38,656 INFO [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., storeName=49e5023b322e0a73b13e6bdd2847ca14/info, priority=13, startTime=1731671378615; duration=0sec 2024-11-15T11:49:38,656 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:38,656 DEBUG [RS:0;7adf9b3d9d04:39085-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49e5023b322e0a73b13e6bdd2847ca14:info 2024-11-15T11:49:39,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/99857ba7f7a3457ab44e4e9fb63e917f 2024-11-15T11:49:39,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/99857ba7f7a3457ab44e4e9fb63e917f as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/99857ba7f7a3457ab44e4e9fb63e917f 2024-11-15T11:49:39,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/99857ba7f7a3457ab44e4e9fb63e917f, entries=11, sequenceid=291, filesize=16.4 K 2024-11-15T11:49:39,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=1.05 KB/1076 for 49e5023b322e0a73b13e6bdd2847ca14 in 429ms, sequenceid=291, compaction requested=false 2024-11-15T11:49:39,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:39,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:39,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:40,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:40,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:40,630 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T11:49:40,635 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/76a042e80cd44933833e338839f7eddb is 1080, key is row0224/info:/1731671378617/Put/seqid=0 2024-11-15T11:49:40,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741872_1048 (size=12523) 2024-11-15T11:49:40,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741872_1048 (size=12523) 2024-11-15T11:49:40,644 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/76a042e80cd44933833e338839f7eddb 2024-11-15T11:49:40,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/76a042e80cd44933833e338839f7eddb as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/76a042e80cd44933833e338839f7eddb 2024-11-15T11:49:40,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/76a042e80cd44933833e338839f7eddb, entries=7, sequenceid=302, filesize=12.2 K 2024-11-15T11:49:40,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 49e5023b322e0a73b13e6bdd2847ca14 in 26ms, sequenceid=302, compaction requested=true 2024-11-15T11:49:40,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:40,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49e5023b322e0a73b13e6bdd2847ca14:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T11:49:40,656 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T11:49:40,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:40,657 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 198377 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T11:49:40,657 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HStore(1541): 49e5023b322e0a73b13e6bdd2847ca14/info is initiating minor compaction (all files) 2024-11-15T11:49:40,658 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49e5023b322e0a73b13e6bdd2847ca14/info in TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:40,658 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/197b254bcc94485aa783b6e700fb7178, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/99857ba7f7a3457ab44e4e9fb63e917f, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/76a042e80cd44933833e338839f7eddb] into tmpdir=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp, totalSize=193.7 K 2024-11-15T11:49:40,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8855): Flush requested on 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:40,658 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] compactions.Compactor(225): Compacting 197b254bcc94485aa783b6e700fb7178, keycount=151, bloomtype=ROW, size=165.1 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1731671348055 2024-11-15T11:49:40,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-15T11:49:40,658 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] compactions.Compactor(225): Compacting 99857ba7f7a3457ab44e4e9fb63e917f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731671378594 2024-11-15T11:49:40,659 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] compactions.Compactor(225): Compacting 76a042e80cd44933833e338839f7eddb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1731671378617 2024-11-15T11:49:40,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/35265554fb5f429f8fe7bca2fe648e4a is 1080, key is row0231/info:/1731671380631/Put/seqid=0 2024-11-15T11:49:40,678 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49e5023b322e0a73b13e6bdd2847ca14#info#compaction#89 average throughput is 57.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T11:49:40,679 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/83c11a6ebbb944a1bfb3b224a208a5b3 is 1080, key is row0062/info:/1731671348055/Put/seqid=0 2024-11-15T11:49:40,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741873_1049 (size=19013) 2024-11-15T11:49:40,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741873_1049 (size=19013) 2024-11-15T11:49:40,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/35265554fb5f429f8fe7bca2fe648e4a 2024-11-15T11:49:40,683 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-15T11:49:40,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741874_1050 (size=188547) 2024-11-15T11:49:40,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741874_1050 (size=188547) 2024-11-15T11:49:40,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/35265554fb5f429f8fe7bca2fe648e4a as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/35265554fb5f429f8fe7bca2fe648e4a 2024-11-15T11:49:40,690 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/83c11a6ebbb944a1bfb3b224a208a5b3 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/83c11a6ebbb944a1bfb3b224a208a5b3 2024-11-15T11:49:40,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/35265554fb5f429f8fe7bca2fe648e4a, entries=13, sequenceid=318, filesize=18.6 K 2024-11-15T11:49:40,696 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49e5023b322e0a73b13e6bdd2847ca14/info of 49e5023b322e0a73b13e6bdd2847ca14 into 83c11a6ebbb944a1bfb3b224a208a5b3(size=184.1 K), total size for store is 202.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T11:49:40,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=13.66 KB/13988 for 49e5023b322e0a73b13e6bdd2847ca14 in 38ms, sequenceid=318, compaction requested=false 2024-11-15T11:49:40,696 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:40,696 INFO [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., storeName=49e5023b322e0a73b13e6bdd2847ca14/info, priority=13, startTime=1731671380656; duration=0sec 2024-11-15T11:49:40,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:40,696 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T11:49:40,696 DEBUG [RS:0;7adf9b3d9d04:39085-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49e5023b322e0a73b13e6bdd2847ca14:info 2024-11-15T11:49:41,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:41,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:42,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:42,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:42,689 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-15T11:49:42,690 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C39085%2C1731671334464.1731671382690 2024-11-15T11:49:42,699 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:42,699 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:42,699 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:42,699 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:42,699 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:42,700 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464/7adf9b3d9d04%2C39085%2C1731671334464.1731671334986 with entries=312, filesize=308.51 KB; new WAL /user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464/7adf9b3d9d04%2C39085%2C1731671334464.1731671382690 2024-11-15T11:49:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741832_1008 (size=315921) 2024-11-15T11:49:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741832_1008 (size=315921) 2024-11-15T11:49:42,703 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38175:38175),(127.0.0.1/127.0.0.1:45607:45607)] 2024-11-15T11:49:42,706 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 54d9b36c4ac742307d7d87ca14264576: 2024-11-15T11:49:42,707 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 49e5023b322e0a73b13e6bdd2847ca14 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-15T11:49:42,710 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/5937ddf5ff184b7899ea2fce537b1a5a is 1080, key is row0244/info:/1731671380660/Put/seqid=0 2024-11-15T11:49:42,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741876_1052 (size=19013) 2024-11-15T11:49:42,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741876_1052 (size=19013) 2024-11-15T11:49:42,715 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/5937ddf5ff184b7899ea2fce537b1a5a 2024-11-15T11:49:42,719 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/.tmp/info/5937ddf5ff184b7899ea2fce537b1a5a as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/5937ddf5ff184b7899ea2fce537b1a5a 2024-11-15T11:49:42,724 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/5937ddf5ff184b7899ea2fce537b1a5a, entries=13, sequenceid=335, filesize=18.6 K 2024-11-15T11:49:42,725 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for 49e5023b322e0a73b13e6bdd2847ca14 in 19ms, sequenceid=335, compaction requested=true 2024-11-15T11:49:42,725 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 49e5023b322e0a73b13e6bdd2847ca14: 2024-11-15T11:49:42,726 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-15T11:49:42,729 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/info/5e135ea8a6c541cb9f5238e2bd1baea9 is 193, key is TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14./info:regioninfo/1731671351352/Put/seqid=0 2024-11-15T11:49:42,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741877_1053 (size=6223) 2024-11-15T11:49:42,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741877_1053 (size=6223) 2024-11-15T11:49:42,734 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/info/5e135ea8a6c541cb9f5238e2bd1baea9 2024-11-15T11:49:42,738 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/.tmp/info/5e135ea8a6c541cb9f5238e2bd1baea9 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/info/5e135ea8a6c541cb9f5238e2bd1baea9 2024-11-15T11:49:42,742 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/info/5e135ea8a6c541cb9f5238e2bd1baea9, entries=5, sequenceid=21, filesize=6.1 K 2024-11-15T11:49:42,743 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 17ms, sequenceid=21, compaction requested=false 2024-11-15T11:49:42,744 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T11:49:42,744 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C39085%2C1731671334464.1731671382744 2024-11-15T11:49:42,748 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:42,748 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:42,748 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:42,748 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:42,748 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:42,748 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464/7adf9b3d9d04%2C39085%2C1731671334464.1731671382690 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464/7adf9b3d9d04%2C39085%2C1731671334464.1731671382744 2024-11-15T11:49:42,749 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45607:45607),(127.0.0.1/127.0.0.1:38175:38175)] 2024-11-15T11:49:42,749 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464/7adf9b3d9d04%2C39085%2C1731671334464.1731671382690 is not closed yet, will try archiving it next time 2024-11-15T11:49:42,749 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464/7adf9b3d9d04%2C39085%2C1731671334464.1731671334986 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/oldWALs/7adf9b3d9d04%2C39085%2C1731671334464.1731671334986 2024-11-15T11:49:42,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741875_1051 (size=731) 2024-11-15T11:49:42,750 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:49:42,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741875_1051 (size=731) 2024-11-15T11:49:42,750 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/WALs/7adf9b3d9d04,39085,1731671334464/7adf9b3d9d04%2C39085%2C1731671334464.1731671382690 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/oldWALs/7adf9b3d9d04%2C39085%2C1731671334464.1731671382690 2024-11-15T11:49:42,851 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T11:49:42,851 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:49:42,851 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:49:42,852 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:42,852 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:42,852 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:49:42,853 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T11:49:42,853 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1960192319, stopped=false 2024-11-15T11:49:42,854 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7adf9b3d9d04,37705,1731671334324 2024-11-15T11:49:42,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:49:42,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:49:42,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:42,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:42,883 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:49:42,883 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:49:42,884 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:49:42,884 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:49:42,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:42,884 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:49:42,884 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7adf9b3d9d04,39085,1731671334464' ***** 2024-11-15T11:49:42,884 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:49:42,885 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:49:42,885 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:49:42,886 INFO [RS:0;7adf9b3d9d04:39085 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:49:42,886 INFO [RS:0;7adf9b3d9d04:39085 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:49:42,886 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(3091): Received CLOSE for 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:42,886 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(3091): Received CLOSE for 49e5023b322e0a73b13e6bdd2847ca14 2024-11-15T11:49:42,886 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(959): stopping server 7adf9b3d9d04,39085,1731671334464 2024-11-15T11:49:42,886 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:49:42,886 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 54d9b36c4ac742307d7d87ca14264576, disabling compactions & flushes 2024-11-15T11:49:42,886 INFO [RS:0;7adf9b3d9d04:39085 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7adf9b3d9d04:39085. 2024-11-15T11:49:42,886 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. 2024-11-15T11:49:42,886 DEBUG [RS:0;7adf9b3d9d04:39085 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:49:42,886 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. 2024-11-15T11:49:42,886 DEBUG [RS:0;7adf9b3d9d04:39085 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:42,886 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. after waiting 0 ms 2024-11-15T11:49:42,886 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. 2024-11-15T11:49:42,887 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:49:42,887 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:49:42,887 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:49:42,887 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T11:49:42,887 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-15T11:49:42,887 DEBUG [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(1325): Online Regions={54d9b36c4ac742307d7d87ca14264576=TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576., 49e5023b322e0a73b13e6bdd2847ca14=TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14., 1588230740=hbase:meta,,1.1588230740} 2024-11-15T11:49:42,887 DEBUG [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 49e5023b322e0a73b13e6bdd2847ca14, 54d9b36c4ac742307d7d87ca14264576 2024-11-15T11:49:42,887 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:49:42,888 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:49:42,887 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b->hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/0ffb8896242d44308d52fe37cf3f67bd-bottom] to archive 2024-11-15T11:49:42,888 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:49:42,888 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:49:42,888 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:49:42,889 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T11:49:42,891 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:42,891 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7adf9b3d9d04:37705 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-15T11:49:42,891 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-15T11:49:42,893 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-15T11:49:42,893 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:49:42,893 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:49:42,893 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671382887Running coprocessor pre-close hooks at 1731671382887Disabling compacts and flushes for region at 1731671382887Disabling writes for close at 1731671382888 (+1 ms)Writing region close event to WAL at 1731671382889 (+1 ms)Running coprocessor post-close hooks at 1731671382893 (+4 ms)Closed at 1731671382893 2024-11-15T11:49:42,893 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T11:49:42,895 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/54d9b36c4ac742307d7d87ca14264576/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-15T11:49:42,895 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. 2024-11-15T11:49:42,895 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 54d9b36c4ac742307d7d87ca14264576: Waiting for close lock at 1731671382886Running coprocessor pre-close hooks at 1731671382886Disabling compacts and flushes for region at 1731671382886Disabling writes for close at 1731671382886Writing region close event to WAL at 1731671382892 (+6 ms)Running coprocessor post-close hooks at 1731671382895 (+3 ms)Closed at 1731671382895 2024-11-15T11:49:42,895 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731671350546.54d9b36c4ac742307d7d87ca14264576. 2024-11-15T11:49:42,895 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 49e5023b322e0a73b13e6bdd2847ca14, disabling compactions & flushes 2024-11-15T11:49:42,895 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:42,895 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:42,895 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. after waiting 0 ms 2024-11-15T11:49:42,895 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:42,896 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b->hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/eed6ab3436672cf1ecb2e52ea172410b/info/0ffb8896242d44308d52fe37cf3f67bd-top, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-e6f26f241d4d45eea80ebf2a2a5cc0ce, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-95bacbe698c64d3f96640d0e6d0ca5e4, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2366b5d056e41e1847295be29326367, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-c65888852b6547089311f91929c56cd3, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/b08d196ba7b941539b22d70dc48557c3, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8db3a450c8cc44f7ac0e37d263b6b660, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/3a7f9e4b3bc34385bbb408fd2343a01e, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/70efe76ba0804333b947c7512adf078a, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1bcebc8c622a4faabcf62abaa2e36857, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0898d1f6e61a45a1bae4b9c38e524e0d, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1f59309ec7644135aedd266539011165, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/c7bb88fc96504c8dafe8007173775bb0, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2aa343d54fb4999b26a8692bce3927a, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/cca7c432c06341b4a62079dcb053ea75, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6b91a27717e545ed952df49b5da4ddb5, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8507e33c43c84dfdba17d2d06e1d2ecf, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/081b0c46f2264263896f76e0664825c4, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/197b254bcc94485aa783b6e700fb7178, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6be5627e802c4f8a81955d2de3775efe, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/99857ba7f7a3457ab44e4e9fb63e917f, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/76a042e80cd44933833e338839f7eddb] to archive 2024-11-15T11:49:42,897 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T11:49:42,898 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0ffb8896242d44308d52fe37cf3f67bd.eed6ab3436672cf1ecb2e52ea172410b 2024-11-15T11:49:42,899 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-e6f26f241d4d45eea80ebf2a2a5cc0ce to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-e6f26f241d4d45eea80ebf2a2a5cc0ce 2024-11-15T11:49:42,900 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-95bacbe698c64d3f96640d0e6d0ca5e4 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-95bacbe698c64d3f96640d0e6d0ca5e4 2024-11-15T11:49:42,901 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2366b5d056e41e1847295be29326367 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2366b5d056e41e1847295be29326367 2024-11-15T11:49:42,903 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-c65888852b6547089311f91929c56cd3 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/TestLogRolling-testLogRolling=eed6ab3436672cf1ecb2e52ea172410b-c65888852b6547089311f91929c56cd3 2024-11-15T11:49:42,904 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/b08d196ba7b941539b22d70dc48557c3 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/b08d196ba7b941539b22d70dc48557c3 2024-11-15T11:49:42,904 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8db3a450c8cc44f7ac0e37d263b6b660 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8db3a450c8cc44f7ac0e37d263b6b660 2024-11-15T11:49:42,905 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/3a7f9e4b3bc34385bbb408fd2343a01e to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/3a7f9e4b3bc34385bbb408fd2343a01e 2024-11-15T11:49:42,906 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/70efe76ba0804333b947c7512adf078a to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/70efe76ba0804333b947c7512adf078a 2024-11-15T11:49:42,907 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1bcebc8c622a4faabcf62abaa2e36857 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1bcebc8c622a4faabcf62abaa2e36857 2024-11-15T11:49:42,908 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0898d1f6e61a45a1bae4b9c38e524e0d to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/0898d1f6e61a45a1bae4b9c38e524e0d 2024-11-15T11:49:42,909 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1f59309ec7644135aedd266539011165 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/1f59309ec7644135aedd266539011165 2024-11-15T11:49:42,910 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/c7bb88fc96504c8dafe8007173775bb0 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/c7bb88fc96504c8dafe8007173775bb0 2024-11-15T11:49:42,911 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2aa343d54fb4999b26a8692bce3927a to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/d2aa343d54fb4999b26a8692bce3927a 2024-11-15T11:49:42,912 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/cca7c432c06341b4a62079dcb053ea75 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/cca7c432c06341b4a62079dcb053ea75 2024-11-15T11:49:42,913 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6b91a27717e545ed952df49b5da4ddb5 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6b91a27717e545ed952df49b5da4ddb5 2024-11-15T11:49:42,914 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8507e33c43c84dfdba17d2d06e1d2ecf to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/8507e33c43c84dfdba17d2d06e1d2ecf 2024-11-15T11:49:42,915 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/081b0c46f2264263896f76e0664825c4 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/081b0c46f2264263896f76e0664825c4 2024-11-15T11:49:42,916 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/197b254bcc94485aa783b6e700fb7178 to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/197b254bcc94485aa783b6e700fb7178 2024-11-15T11:49:42,916 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6be5627e802c4f8a81955d2de3775efe to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/6be5627e802c4f8a81955d2de3775efe 2024-11-15T11:49:42,917 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/99857ba7f7a3457ab44e4e9fb63e917f to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/99857ba7f7a3457ab44e4e9fb63e917f 2024-11-15T11:49:42,918 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/76a042e80cd44933833e338839f7eddb to hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/archive/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/info/76a042e80cd44933833e338839f7eddb 2024-11-15T11:49:42,918 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d2366b5d056e41e1847295be29326367=43081, b08d196ba7b941539b22d70dc48557c3=12516, 8db3a450c8cc44f7ac0e37d263b6b660=64811, 3a7f9e4b3bc34385bbb408fd2343a01e=19000, 70efe76ba0804333b947c7512adf078a=19000, 1bcebc8c622a4faabcf62abaa2e36857=90862, 0898d1f6e61a45a1bae4b9c38e524e0d=16828, 1f59309ec7644135aedd266539011165=21156, c7bb88fc96504c8dafe8007173775bb0=114684, d2aa343d54fb4999b26a8692bce3927a=12516, cca7c432c06341b4a62079dcb053ea75=16828, 6b91a27717e545ed952df49b5da4ddb5=147331, 8507e33c43c84dfdba17d2d06e1d2ecf=25472, 081b0c46f2264263896f76e0664825c4=12521, 197b254bcc94485aa783b6e700fb7178=169015, 6be5627e802c4f8a81955d2de3775efe=19013, 99857ba7f7a3457ab44e4e9fb63e917f=16839, 76a042e80cd44933833e338839f7eddb=12523] 2024-11-15T11:49:42,921 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/data/default/TestLogRolling-testLogRolling/49e5023b322e0a73b13e6bdd2847ca14/recovered.edits/338.seqid, newMaxSeqId=338, maxSeqId=126 2024-11-15T11:49:42,922 INFO [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:42,922 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 49e5023b322e0a73b13e6bdd2847ca14: Waiting for close lock at 1731671382895Running coprocessor pre-close hooks at 1731671382895Disabling compacts and flushes for region at 1731671382895Disabling writes for close at 1731671382895Writing region close event to WAL at 1731671382919 (+24 ms)Running coprocessor post-close hooks at 1731671382922 (+3 ms)Closed at 1731671382922 2024-11-15T11:49:42,922 DEBUG [RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731671350546.49e5023b322e0a73b13e6bdd2847ca14. 2024-11-15T11:49:43,088 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(976): stopping server 7adf9b3d9d04,39085,1731671334464; all regions closed. 2024-11-15T11:49:43,088 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,088 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,088 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,088 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,089 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741834_1010 (size=8107) 2024-11-15T11:49:43,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741834_1010 (size=8107) 2024-11-15T11:49:43,092 DEBUG [RS:0;7adf9b3d9d04:39085 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/oldWALs 2024-11-15T11:49:43,092 INFO [RS:0;7adf9b3d9d04:39085 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C39085%2C1731671334464.meta:.meta(num 1731671335660) 2024-11-15T11:49:43,093 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,093 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,093 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,093 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,093 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741878_1054 (size=780) 2024-11-15T11:49:43,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741878_1054 (size=780) 2024-11-15T11:49:43,096 DEBUG [RS:0;7adf9b3d9d04:39085 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/oldWALs 2024-11-15T11:49:43,096 INFO [RS:0;7adf9b3d9d04:39085 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C39085%2C1731671334464:(num 1731671382744) 2024-11-15T11:49:43,096 DEBUG [RS:0;7adf9b3d9d04:39085 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:43,096 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:49:43,096 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:49:43,097 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.ChoreService(370): Chore service for: regionserver/7adf9b3d9d04:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T11:49:43,097 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:49:43,097 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:49:43,097 INFO [RS:0;7adf9b3d9d04:39085 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39085 2024-11-15T11:49:43,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:49:43,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7adf9b3d9d04,39085,1731671334464 2024-11-15T11:49:43,124 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:49:43,124 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7adf9b3d9d04,39085,1731671334464] 2024-11-15T11:49:43,140 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7adf9b3d9d04,39085,1731671334464 already deleted, retry=false 2024-11-15T11:49:43,141 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7adf9b3d9d04,39085,1731671334464 expired; onlineServers=0 2024-11-15T11:49:43,141 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7adf9b3d9d04,37705,1731671334324' ***** 2024-11-15T11:49:43,141 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T11:49:43,141 INFO [M:0;7adf9b3d9d04:37705 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:49:43,141 INFO [M:0;7adf9b3d9d04:37705 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:49:43,141 DEBUG [M:0;7adf9b3d9d04:37705 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T11:49:43,141 DEBUG [M:0;7adf9b3d9d04:37705 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T11:49:43,141 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T11:49:43,141 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671334805 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671334805,5,FailOnTimeoutGroup] 2024-11-15T11:49:43,141 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671334805 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671334805,5,FailOnTimeoutGroup] 2024-11-15T11:49:43,141 INFO [M:0;7adf9b3d9d04:37705 {}] hbase.ChoreService(370): Chore service for: master/7adf9b3d9d04:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T11:49:43,141 INFO [M:0;7adf9b3d9d04:37705 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:49:43,141 DEBUG [M:0;7adf9b3d9d04:37705 {}] master.HMaster(1795): Stopping service threads 2024-11-15T11:49:43,141 INFO [M:0;7adf9b3d9d04:37705 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T11:49:43,141 INFO [M:0;7adf9b3d9d04:37705 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:49:43,142 INFO [M:0;7adf9b3d9d04:37705 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T11:49:43,142 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T11:49:43,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T11:49:43,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:43,149 DEBUG [M:0;7adf9b3d9d04:37705 {}] zookeeper.ZKUtil(347): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T11:49:43,149 WARN [M:0;7adf9b3d9d04:37705 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T11:49:43,150 INFO [M:0;7adf9b3d9d04:37705 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/.lastflushedseqids 2024-11-15T11:49:43,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741879_1055 (size=228) 2024-11-15T11:49:43,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741879_1055 (size=228) 2024-11-15T11:49:43,157 INFO [M:0;7adf9b3d9d04:37705 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T11:49:43,157 INFO [M:0;7adf9b3d9d04:37705 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T11:49:43,157 DEBUG [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:49:43,157 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:43,157 DEBUG [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:43,157 DEBUG [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:49:43,157 DEBUG [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:43,158 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.41 KB heapSize=63.33 KB 2024-11-15T11:49:43,177 DEBUG [M:0;7adf9b3d9d04:37705 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1426f852f08446698bb389757ccc446a is 82, key is hbase:meta,,1/info:regioninfo/1731671335699/Put/seqid=0 2024-11-15T11:49:43,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741880_1056 (size=5672) 2024-11-15T11:49:43,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741880_1056 (size=5672) 2024-11-15T11:49:43,182 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1426f852f08446698bb389757ccc446a 2024-11-15T11:49:43,200 DEBUG [M:0;7adf9b3d9d04:37705 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7145af1e4c654763933377003f7ad83b is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731671336182/Put/seqid=0 2024-11-15T11:49:43,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741881_1057 (size=7089) 2024-11-15T11:49:43,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741881_1057 (size=7089) 2024-11-15T11:49:43,205 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.80 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7145af1e4c654763933377003f7ad83b 2024-11-15T11:49:43,209 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7145af1e4c654763933377003f7ad83b 2024-11-15T11:49:43,226 DEBUG [M:0;7adf9b3d9d04:37705 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f5eac60e9d4d4eeaad37539fdd2f0d43 is 69, key is 7adf9b3d9d04,39085,1731671334464/rs:state/1731671334830/Put/seqid=0 2024-11-15T11:49:43,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741882_1058 (size=5156) 2024-11-15T11:49:43,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741882_1058 (size=5156) 2024-11-15T11:49:43,231 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f5eac60e9d4d4eeaad37539fdd2f0d43 2024-11-15T11:49:43,232 INFO [RS:0;7adf9b3d9d04:39085 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:49:43,232 INFO [RS:0;7adf9b3d9d04:39085 {}] regionserver.HRegionServer(1031): Exiting; stopping=7adf9b3d9d04,39085,1731671334464; zookeeper connection closed. 2024-11-15T11:49:43,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:49:43,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1013f9d30680001, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:49:43,233 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@556e03b6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@556e03b6 2024-11-15T11:49:43,233 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T11:49:43,248 DEBUG [M:0;7adf9b3d9d04:37705 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e677c464c4eb4b04bf66a9e1940670dd is 52, key is load_balancer_on/state:d/1731671335803/Put/seqid=0 2024-11-15T11:49:43,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741883_1059 (size=5056) 2024-11-15T11:49:43,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741883_1059 (size=5056) 2024-11-15T11:49:43,253 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e677c464c4eb4b04bf66a9e1940670dd 2024-11-15T11:49:43,258 DEBUG [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1426f852f08446698bb389757ccc446a as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1426f852f08446698bb389757ccc446a 2024-11-15T11:49:43,262 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1426f852f08446698bb389757ccc446a, entries=8, sequenceid=125, filesize=5.5 K 2024-11-15T11:49:43,263 DEBUG [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7145af1e4c654763933377003f7ad83b as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7145af1e4c654763933377003f7ad83b 2024-11-15T11:49:43,268 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7145af1e4c654763933377003f7ad83b 2024-11-15T11:49:43,268 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7145af1e4c654763933377003f7ad83b, entries=13, sequenceid=125, filesize=6.9 K 2024-11-15T11:49:43,269 DEBUG [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f5eac60e9d4d4eeaad37539fdd2f0d43 as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f5eac60e9d4d4eeaad37539fdd2f0d43 2024-11-15T11:49:43,273 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f5eac60e9d4d4eeaad37539fdd2f0d43, entries=1, sequenceid=125, filesize=5.0 K 2024-11-15T11:49:43,274 DEBUG [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e677c464c4eb4b04bf66a9e1940670dd as hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e677c464c4eb4b04bf66a9e1940670dd 2024-11-15T11:49:43,278 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42507/user/jenkins/test-data/2a376162-fc08-a2ea-380f-649549026e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e677c464c4eb4b04bf66a9e1940670dd, entries=1, sequenceid=125, filesize=4.9 K 2024-11-15T11:49:43,279 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=125, compaction requested=false 2024-11-15T11:49:43,281 INFO [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:43,281 DEBUG [M:0;7adf9b3d9d04:37705 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671383157Disabling compacts and flushes for region at 1731671383157Disabling writes for close at 1731671383157Obtaining lock to block concurrent updates at 1731671383158 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731671383158Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52639, getHeapSize=64784, getOffHeapSize=0, getCellsCount=148 at 1731671383158Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731671383159 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731671383159Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731671383176 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731671383176Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731671383186 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731671383199 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731671383199Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731671383209 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731671383226 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731671383226Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731671383235 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731671383248 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731671383248Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b80d04a: reopening flushed file at 1731671383257 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b04d62e: reopening flushed file at 1731671383262 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d8ab12f: reopening flushed file at 1731671383268 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@411cf3db: reopening flushed file at 1731671383273 (+5 ms)Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=125, compaction requested=false at 1731671383279 (+6 ms)Writing region close event to WAL at 1731671383281 (+2 ms)Closed at 1731671383281 2024-11-15T11:49:43,281 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,281 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,281 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,281 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,281 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:43,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741830_1006 (size=61308) 2024-11-15T11:49:43,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35265 is added to blk_1073741830_1006 (size=61308) 2024-11-15T11:49:43,284 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:49:43,284 INFO [M:0;7adf9b3d9d04:37705 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T11:49:43,284 INFO [M:0;7adf9b3d9d04:37705 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37705 2024-11-15T11:49:43,284 INFO [M:0;7adf9b3d9d04:37705 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:49:43,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:43,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:43,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:49:43,416 INFO [M:0;7adf9b3d9d04:37705 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:49:43,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37705-0x1013f9d30680000, quorum=127.0.0.1:59738, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:49:43,418 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@42c5c09{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:49:43,418 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ce0a24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:49:43,419 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:49:43,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f61588{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:49:43,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13ef5561{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/hadoop.log.dir/,STOPPED} 2024-11-15T11:49:43,420 WARN [BP-31277891-172.17.0.2-1731671332645 heartbeating to localhost/127.0.0.1:42507 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:49:43,420 WARN [BP-31277891-172.17.0.2-1731671332645 heartbeating to localhost/127.0.0.1:42507 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-31277891-172.17.0.2-1731671332645 (Datanode Uuid 474fe460-4e8f-441d-8d17-66401fec762f) service to localhost/127.0.0.1:42507 2024-11-15T11:49:43,420 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:49:43,421 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:49:43,421 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/data/data3/current/BP-31277891-172.17.0.2-1731671332645 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:49:43,421 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/data/data4/current/BP-31277891-172.17.0.2-1731671332645 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:49:43,421 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:49:43,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a6e8e46{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:49:43,424 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1204fb24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:49:43,424 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:49:43,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f424370{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:49:43,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2eb912ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/hadoop.log.dir/,STOPPED} 2024-11-15T11:49:43,426 WARN [BP-31277891-172.17.0.2-1731671332645 heartbeating to localhost/127.0.0.1:42507 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:49:43,426 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:49:43,426 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:49:43,426 WARN [BP-31277891-172.17.0.2-1731671332645 heartbeating to localhost/127.0.0.1:42507 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-31277891-172.17.0.2-1731671332645 (Datanode Uuid c43d53ca-408b-499f-9498-c8f88ac4ca2d) service to localhost/127.0.0.1:42507 2024-11-15T11:49:43,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/data/data1/current/BP-31277891-172.17.0.2-1731671332645 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:49:43,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/cluster_926d09c0-7fc5-146a-3776-532c837bb4dc/data/data2/current/BP-31277891-172.17.0.2-1731671332645 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:49:43,427 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:49:43,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64d2170c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:49:43,433 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@551592b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:49:43,433 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:49:43,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54fcac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:49:43,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@671b15e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/hadoop.log.dir/,STOPPED} 2024-11-15T11:49:43,441 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T11:49:43,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T11:49:43,474 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 205) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42507 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42507 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42507 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42507 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42507 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:42507 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42507 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42507 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42507 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42507 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=518 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=202 (was 254), ProcessCount=11 (was 11), AvailableMemoryMB=10147 (was 10337) 2024-11-15T11:49:43,481 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=518, MaxFileDescriptor=1048576, SystemLoadAverage=202, ProcessCount=11, AvailableMemoryMB=10147 2024-11-15T11:49:43,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T11:49:43,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/hadoop.log.dir so I do NOT create it in target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45 2024-11-15T11:49:43,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/faa31688-46b6-1f16-abaa-39f06c167d2c/hadoop.tmp.dir so I do NOT create it in target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45 2024-11-15T11:49:43,482 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295, deleteOnExit=true 2024-11-15T11:49:43,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T11:49:43,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/test.cache.data in system properties and HBase conf 2024-11-15T11:49:43,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T11:49:43,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/hadoop.log.dir in system properties and HBase conf 2024-11-15T11:49:43,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T11:49:43,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T11:49:43,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T11:49:43,482 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T11:49:43,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/nfs.dump.dir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/java.io.tmpdir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T11:49:43,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T11:49:43,494 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:49:43,738 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:49:43,740 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:49:43,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:49:43,742 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:49:43,742 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:49:43,743 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:49:43,746 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b72d363{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:49:43,746 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c66b7d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:49:43,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61edd007{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/java.io.tmpdir/jetty-localhost-38005-hadoop-hdfs-3_4_1-tests_jar-_-any-771087941551696061/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:49:43,836 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d664f93{HTTP/1.1, (http/1.1)}{localhost:38005} 2024-11-15T11:49:43,836 INFO [Time-limited test {}] server.Server(415): Started @299590ms 2024-11-15T11:49:43,846 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T11:49:44,018 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:49:44,021 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:49:44,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:49:44,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:49:44,022 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:49:44,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fcd61c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:49:44,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1040cecb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:49:44,122 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9fc2daa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/java.io.tmpdir/jetty-localhost-45063-hadoop-hdfs-3_4_1-tests_jar-_-any-9579790612212038838/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:49:44,122 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a1af98b{HTTP/1.1, (http/1.1)}{localhost:45063} 2024-11-15T11:49:44,122 INFO [Time-limited test {}] server.Server(415): Started @299876ms 2024-11-15T11:49:44,123 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:49:44,146 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:49:44,149 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:49:44,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:49:44,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:49:44,149 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:49:44,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49e6dd92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:49:44,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bfe0bbd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:49:44,243 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@627a202d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/java.io.tmpdir/jetty-localhost-37481-hadoop-hdfs-3_4_1-tests_jar-_-any-14905516212353068332/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:49:44,244 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22ed154c{HTTP/1.1, (http/1.1)}{localhost:37481} 2024-11-15T11:49:44,244 INFO [Time-limited test {}] server.Server(415): Started @299998ms 2024-11-15T11:49:44,244 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:49:44,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:44,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:44,802 WARN [Thread-2498 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/data/data1/current/BP-493428621-172.17.0.2-1731671383497/current, will proceed with Du for space computation calculation, 2024-11-15T11:49:44,802 WARN [Thread-2499 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/data/data2/current/BP-493428621-172.17.0.2-1731671383497/current, will proceed with Du for space computation calculation, 2024-11-15T11:49:44,825 WARN [Thread-2462 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:49:44,827 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e9e52108dbdf64d with lease ID 0x20ccad7cc52e6ad8: Processing first storage report for DS-ca81185e-0a2f-46ea-ba07-92abdccbe8b0 from datanode DatanodeRegistration(127.0.0.1:36089, datanodeUuid=4669be31-4f25-40c6-a36f-8d4494be7cd8, infoPort=43675, infoSecurePort=0, ipcPort=39115, storageInfo=lv=-57;cid=testClusterID;nsid=1689769808;c=1731671383497) 2024-11-15T11:49:44,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e9e52108dbdf64d with lease ID 0x20ccad7cc52e6ad8: from storage DS-ca81185e-0a2f-46ea-ba07-92abdccbe8b0 node DatanodeRegistration(127.0.0.1:36089, datanodeUuid=4669be31-4f25-40c6-a36f-8d4494be7cd8, infoPort=43675, infoSecurePort=0, ipcPort=39115, storageInfo=lv=-57;cid=testClusterID;nsid=1689769808;c=1731671383497), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:49:44,827 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e9e52108dbdf64d with lease ID 0x20ccad7cc52e6ad8: Processing first storage report for DS-1b181edf-1aaf-4492-875f-c3c967e5e9d8 from datanode DatanodeRegistration(127.0.0.1:36089, datanodeUuid=4669be31-4f25-40c6-a36f-8d4494be7cd8, infoPort=43675, infoSecurePort=0, ipcPort=39115, storageInfo=lv=-57;cid=testClusterID;nsid=1689769808;c=1731671383497) 2024-11-15T11:49:44,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e9e52108dbdf64d with lease ID 0x20ccad7cc52e6ad8: from storage DS-1b181edf-1aaf-4492-875f-c3c967e5e9d8 node DatanodeRegistration(127.0.0.1:36089, datanodeUuid=4669be31-4f25-40c6-a36f-8d4494be7cd8, infoPort=43675, infoSecurePort=0, ipcPort=39115, storageInfo=lv=-57;cid=testClusterID;nsid=1689769808;c=1731671383497), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:49:44,854 INFO [regionserver/7adf9b3d9d04:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:49:44,975 WARN [Thread-2509 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/data/data3/current/BP-493428621-172.17.0.2-1731671383497/current, will proceed with Du for space computation calculation, 2024-11-15T11:49:44,975 WARN [Thread-2510 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/data/data4/current/BP-493428621-172.17.0.2-1731671383497/current, will proceed with Du for space computation calculation, 2024-11-15T11:49:45,003 WARN [Thread-2485 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:49:45,005 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c26e6f3e91ab4ba with lease ID 0x20ccad7cc52e6ad9: Processing first storage report for DS-8106c5ab-24e1-44a7-b4ba-c9320a974671 from datanode DatanodeRegistration(127.0.0.1:46247, datanodeUuid=474af7d4-12d0-4d6b-b8e4-3c455c4cd94b, infoPort=35833, infoSecurePort=0, ipcPort=36863, storageInfo=lv=-57;cid=testClusterID;nsid=1689769808;c=1731671383497) 2024-11-15T11:49:45,005 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c26e6f3e91ab4ba with lease ID 0x20ccad7cc52e6ad9: from storage DS-8106c5ab-24e1-44a7-b4ba-c9320a974671 node DatanodeRegistration(127.0.0.1:46247, datanodeUuid=474af7d4-12d0-4d6b-b8e4-3c455c4cd94b, infoPort=35833, infoSecurePort=0, ipcPort=36863, storageInfo=lv=-57;cid=testClusterID;nsid=1689769808;c=1731671383497), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:49:45,005 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c26e6f3e91ab4ba with lease ID 0x20ccad7cc52e6ad9: Processing first storage report for DS-f03b20f8-543b-4de3-b4f7-d519b5ce1151 from datanode DatanodeRegistration(127.0.0.1:46247, datanodeUuid=474af7d4-12d0-4d6b-b8e4-3c455c4cd94b, infoPort=35833, infoSecurePort=0, ipcPort=36863, storageInfo=lv=-57;cid=testClusterID;nsid=1689769808;c=1731671383497) 2024-11-15T11:49:45,005 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c26e6f3e91ab4ba with lease ID 0x20ccad7cc52e6ad9: from storage DS-f03b20f8-543b-4de3-b4f7-d519b5ce1151 node DatanodeRegistration(127.0.0.1:46247, datanodeUuid=474af7d4-12d0-4d6b-b8e4-3c455c4cd94b, infoPort=35833, infoSecurePort=0, ipcPort=36863, storageInfo=lv=-57;cid=testClusterID;nsid=1689769808;c=1731671383497), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:49:45,068 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45 2024-11-15T11:49:45,070 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/zookeeper_0, clientPort=60542, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T11:49:45,071 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60542 2024-11-15T11:49:45,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:49:45,072 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:49:45,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:49:45,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:49:45,081 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102 with version=8 2024-11-15T11:49:45,081 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37621/user/jenkins/test-data/d05e1823-15e2-8921-71f4-a2abcaa5a3ae/hbase-staging 2024-11-15T11:49:45,083 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:49:45,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:49:45,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:49:45,083 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:49:45,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:49:45,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:49:45,083 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T11:49:45,083 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:49:45,084 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45035 2024-11-15T11:49:45,085 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45035 connecting to ZooKeeper ensemble=127.0.0.1:60542 2024-11-15T11:49:45,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:450350x0, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:49:45,137 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45035-0x1013f9df6b30000 connected 2024-11-15T11:49:45,198 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:49:45,200 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:49:45,203 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:49:45,203 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102, hbase.cluster.distributed=false 2024-11-15T11:49:45,206 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:49:45,206 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45035 2024-11-15T11:49:45,206 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45035 2024-11-15T11:49:45,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45035 2024-11-15T11:49:45,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45035 2024-11-15T11:49:45,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45035 2024-11-15T11:49:45,223 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7adf9b3d9d04:0 server-side Connection retries=45 2024-11-15T11:49:45,224 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:49:45,224 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:49:45,224 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:49:45,224 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:49:45,224 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:49:45,224 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:49:45,224 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:49:45,224 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46175 2024-11-15T11:49:45,225 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46175 connecting to ZooKeeper ensemble=127.0.0.1:60542 2024-11-15T11:49:45,226 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:49:45,227 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:49:45,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:461750x0, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:49:45,240 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46175-0x1013f9df6b30001 connected 2024-11-15T11:49:45,240 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:49:45,241 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:49:45,241 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:49:45,242 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T11:49:45,243 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:49:45,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46175 2024-11-15T11:49:45,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46175 2024-11-15T11:49:45,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46175 2024-11-15T11:49:45,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46175 2024-11-15T11:49:45,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46175 2024-11-15T11:49:45,259 DEBUG [M:0;7adf9b3d9d04:45035 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7adf9b3d9d04:45035 2024-11-15T11:49:45,259 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7adf9b3d9d04,45035,1731671385082 2024-11-15T11:49:45,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:49:45,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:49:45,265 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7adf9b3d9d04,45035,1731671385082 2024-11-15T11:49:45,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:45,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T11:49:45,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:45,274 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T11:49:45,274 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7adf9b3d9d04,45035,1731671385082 from backup master directory 2024-11-15T11:49:45,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7adf9b3d9d04,45035,1731671385082 2024-11-15T11:49:45,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:49:45,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:49:45,282 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:49:45,282 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7adf9b3d9d04,45035,1731671385082 2024-11-15T11:49:45,286 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/hbase.id] with ID: c8ce42bb-c3bf-4e22-8e7e-91be3da95adc 2024-11-15T11:49:45,286 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/.tmp/hbase.id 2024-11-15T11:49:45,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:49:45,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:49:45,294 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/.tmp/hbase.id]:[hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/hbase.id] 2024-11-15T11:49:45,306 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:49:45,306 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T11:49:45,308 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T11:49:45,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:45,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:45,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:49:45,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:49:45,329 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:49:45,329 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T11:49:45,330 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:49:45,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:49:45,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:49:45,339 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store 2024-11-15T11:49:45,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:49:45,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:49:45,345 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:49:45,345 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:49:45,345 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:45,345 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:45,345 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:49:45,345 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:45,345 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:45,345 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671385345Disabling compacts and flushes for region at 1731671385345Disabling writes for close at 1731671385345Writing region close event to WAL at 1731671385345Closed at 1731671385345 2024-11-15T11:49:45,346 WARN [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/.initializing 2024-11-15T11:49:45,346 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/WALs/7adf9b3d9d04,45035,1731671385082 2024-11-15T11:49:45,348 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C45035%2C1731671385082, suffix=, logDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/WALs/7adf9b3d9d04,45035,1731671385082, archiveDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/oldWALs, maxLogs=10 2024-11-15T11:49:45,348 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C45035%2C1731671385082.1731671385348 2024-11-15T11:49:45,352 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/WALs/7adf9b3d9d04,45035,1731671385082/7adf9b3d9d04%2C45035%2C1731671385082.1731671385348 2024-11-15T11:49:45,353 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43675:43675),(127.0.0.1/127.0.0.1:35833:35833)] 2024-11-15T11:49:45,353 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:49:45,354 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:49:45,354 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,354 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T11:49:45,356 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:45,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:49:45,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T11:49:45,357 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:45,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:49:45,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T11:49:45,358 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:45,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:49:45,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T11:49:45,360 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:45,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:49:45,360 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,361 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,361 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,362 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,362 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,363 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T11:49:45,364 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:49:45,365 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:49:45,366 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743994, jitterRate=-0.05396370589733124}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T11:49:45,366 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731671385354Initializing all the Stores at 1731671385354Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671385354Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671385354Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671385354Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671385355 (+1 ms)Cleaning up temporary data from old regions at 1731671385362 (+7 ms)Region opened successfully at 1731671385366 (+4 ms) 2024-11-15T11:49:45,367 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T11:49:45,370 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c1ffb71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:49:45,371 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T11:49:45,371 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T11:49:45,371 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T11:49:45,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T11:49:45,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T11:49:45,372 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T11:49:45,373 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T11:49:45,375 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T11:49:45,376 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T11:49:45,406 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T11:49:45,406 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T11:49:45,407 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T11:49:45,415 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T11:49:45,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:45,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:45,415 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T11:49:45,416 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T11:49:45,423 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T11:49:45,424 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T11:49:45,432 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T11:49:45,436 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T11:49:45,440 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T11:49:45,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:49:45,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:49:45,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:45,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:45,450 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7adf9b3d9d04,45035,1731671385082, sessionid=0x1013f9df6b30000, setting cluster-up flag (Was=false) 2024-11-15T11:49:45,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:45,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:45,490 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T11:49:45,491 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,45035,1731671385082 2024-11-15T11:49:45,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:45,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:45,532 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T11:49:45,534 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7adf9b3d9d04,45035,1731671385082 2024-11-15T11:49:45,536 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T11:49:45,539 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T11:49:45,539 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T11:49:45,539 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T11:49:45,540 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7adf9b3d9d04,45035,1731671385082 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T11:49:45,542 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:49:45,542 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:49:45,542 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:49:45,542 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:49:45,542 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7adf9b3d9d04:0, corePoolSize=10, maxPoolSize=10 2024-11-15T11:49:45,542 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,542 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:49:45,542 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,544 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:49:45,545 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T11:49:45,546 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:45,546 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731671415546 2024-11-15T11:49:45,546 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T11:49:45,547 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T11:49:45,547 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T11:49:45,547 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T11:49:45,547 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T11:49:45,547 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T11:49:45,547 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T11:49:45,548 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,548 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(746): ClusterId : c8ce42bb-c3bf-4e22-8e7e-91be3da95adc 2024-11-15T11:49:45,548 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T11:49:45,548 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:49:45,548 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T11:49:45,548 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T11:49:45,549 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T11:49:45,549 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T11:49:45,549 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671385549,5,FailOnTimeoutGroup] 2024-11-15T11:49:45,549 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671385549,5,FailOnTimeoutGroup] 2024-11-15T11:49:45,549 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,549 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T11:49:45,549 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,549 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:49:45,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:49:45,554 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T11:49:45,554 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102 2024-11-15T11:49:45,557 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:49:45,557 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:49:45,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:49:45,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:49:45,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:49:45,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:49:45,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:49:45,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:45,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:49:45,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:49:45,563 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:49:45,563 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:45,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:49:45,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:49:45,565 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:49:45,565 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:45,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:49:45,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:49:45,566 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:49:45,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:49:45,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:45,566 DEBUG [RS:0;7adf9b3d9d04:46175 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65b0342, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7adf9b3d9d04/172.17.0.2:0 2024-11-15T11:49:45,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:49:45,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:49:45,567 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740 2024-11-15T11:49:45,567 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740 2024-11-15T11:49:45,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:49:45,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:49:45,568 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:49:45,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:49:45,571 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:49:45,571 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737687, jitterRate=-0.06198342144489288}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:49:45,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731671385560Initializing all the Stores at 1731671385561 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671385561Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671385561Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671385561Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671385561Cleaning up temporary data from old regions at 1731671385568 (+7 ms)Region opened successfully at 1731671385571 (+3 ms) 2024-11-15T11:49:45,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:49:45,571 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:49:45,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:49:45,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:49:45,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:49:45,572 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:49:45,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671385571Disabling compacts and flushes for region at 1731671385571Disabling writes for close at 1731671385572 (+1 ms)Writing region close event to WAL at 1731671385572Closed at 1731671385572 2024-11-15T11:49:45,573 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:49:45,573 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T11:49:45,573 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T11:49:45,574 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:49:45,574 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T11:49:45,581 DEBUG [RS:0;7adf9b3d9d04:46175 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7adf9b3d9d04:46175 2024-11-15T11:49:45,581 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:49:45,581 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:49:45,581 DEBUG [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:49:45,582 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(2659): reportForDuty to master=7adf9b3d9d04,45035,1731671385082 with port=46175, startcode=1731671385223 2024-11-15T11:49:45,582 DEBUG [RS:0;7adf9b3d9d04:46175 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:49:45,583 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52691, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:49:45,584 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45035 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:45,584 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45035 {}] master.ServerManager(517): Registering regionserver=7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:45,585 DEBUG [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102 2024-11-15T11:49:45,585 DEBUG [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46381 2024-11-15T11:49:45,585 DEBUG [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:49:45,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:49:45,590 DEBUG [RS:0;7adf9b3d9d04:46175 {}] zookeeper.ZKUtil(111): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:45,590 WARN [RS:0;7adf9b3d9d04:46175 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:49:45,590 INFO [RS:0;7adf9b3d9d04:46175 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:49:45,590 DEBUG [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:45,591 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7adf9b3d9d04,46175,1731671385223] 2024-11-15T11:49:45,594 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:49:45,596 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:49:45,597 INFO [RS:0;7adf9b3d9d04:46175 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:49:45,597 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,597 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:49:45,598 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:49:45,598 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7adf9b3d9d04:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:49:45,599 DEBUG [RS:0;7adf9b3d9d04:46175 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7adf9b3d9d04:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:49:45,600 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,600 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,600 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,600 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,600 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,600 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,46175,1731671385223-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:49:45,617 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:49:45,617 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,46175,1731671385223-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,617 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,617 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.Replication(171): 7adf9b3d9d04,46175,1731671385223 started 2024-11-15T11:49:45,629 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:45,629 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(1482): Serving as 7adf9b3d9d04,46175,1731671385223, RpcServer on 7adf9b3d9d04/172.17.0.2:46175, sessionid=0x1013f9df6b30001 2024-11-15T11:49:45,629 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:49:45,629 DEBUG [RS:0;7adf9b3d9d04:46175 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:45,629 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,46175,1731671385223' 2024-11-15T11:49:45,629 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:49:45,630 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:49:45,630 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:49:45,630 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:49:45,630 DEBUG [RS:0;7adf9b3d9d04:46175 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:45,630 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7adf9b3d9d04,46175,1731671385223' 2024-11-15T11:49:45,630 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:49:45,630 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:49:45,631 DEBUG [RS:0;7adf9b3d9d04:46175 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:49:45,631 INFO [RS:0;7adf9b3d9d04:46175 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:49:45,631 INFO [RS:0;7adf9b3d9d04:46175 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:49:45,725 WARN [7adf9b3d9d04:45035 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-15T11:49:45,734 INFO [RS:0;7adf9b3d9d04:46175 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C46175%2C1731671385223, suffix=, logDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/7adf9b3d9d04,46175,1731671385223, archiveDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/oldWALs, maxLogs=32 2024-11-15T11:49:45,735 INFO [RS:0;7adf9b3d9d04:46175 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C46175%2C1731671385223.1731671385735 2024-11-15T11:49:45,741 INFO [RS:0;7adf9b3d9d04:46175 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/7adf9b3d9d04,46175,1731671385223/7adf9b3d9d04%2C46175%2C1731671385223.1731671385735 2024-11-15T11:49:45,742 DEBUG [RS:0;7adf9b3d9d04:46175 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43675:43675),(127.0.0.1/127.0.0.1:35833:35833)] 2024-11-15T11:49:45,975 DEBUG [7adf9b3d9d04:45035 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T11:49:45,976 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:45,980 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,46175,1731671385223, state=OPENING 2024-11-15T11:49:46,048 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T11:49:46,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:46,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:46,058 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:49:46,058 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:49:46,058 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:49:46,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,46175,1731671385223}] 2024-11-15T11:49:46,212 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:49:46,213 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33587, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:49:46,216 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T11:49:46,217 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:49:46,218 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7adf9b3d9d04%2C46175%2C1731671385223.meta, suffix=.meta, logDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/7adf9b3d9d04,46175,1731671385223, archiveDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/oldWALs, maxLogs=32 2024-11-15T11:49:46,219 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7adf9b3d9d04%2C46175%2C1731671385223.meta.1731671386219.meta 2024-11-15T11:49:46,225 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/7adf9b3d9d04,46175,1731671385223/7adf9b3d9d04%2C46175%2C1731671385223.meta.1731671386219.meta 2024-11-15T11:49:46,227 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43675:43675),(127.0.0.1/127.0.0.1:35833:35833)] 2024-11-15T11:49:46,234 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:49:46,235 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T11:49:46,235 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T11:49:46,235 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T11:49:46,235 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T11:49:46,235 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:49:46,235 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T11:49:46,235 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T11:49:46,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:49:46,237 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:49:46,237 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:46,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:49:46,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:49:46,238 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:49:46,238 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:46,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:49:46,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:49:46,239 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:49:46,239 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:46,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:49:46,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:49:46,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:49:46,240 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:49:46,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:49:46,240 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:49:46,241 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740 2024-11-15T11:49:46,242 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740 2024-11-15T11:49:46,243 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:49:46,243 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:49:46,243 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T11:49:46,244 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:49:46,245 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753424, jitterRate=-0.04197239875793457}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T11:49:46,245 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T11:49:46,245 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731671386235Writing region info on filesystem at 1731671386235Initializing all the Stores at 1731671386236 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671386236Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671386236Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731671386236Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731671386236Cleaning up temporary data from old regions at 1731671386243 (+7 ms)Running coprocessor post-open hooks at 1731671386245 (+2 ms)Region opened successfully at 1731671386245 2024-11-15T11:49:46,246 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731671386212 2024-11-15T11:49:46,248 DEBUG [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T11:49:46,248 INFO [RS_OPEN_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T11:49:46,249 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:46,249 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7adf9b3d9d04,46175,1731671385223, state=OPEN 2024-11-15T11:49:46,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:49:46,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:49:46,279 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:46,279 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:49:46,279 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:49:46,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T11:49:46,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7adf9b3d9d04,46175,1731671385223 in 221 msec 2024-11-15T11:49:46,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T11:49:46,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 710 msec 2024-11-15T11:49:46,290 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:49:46,290 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T11:49:46,292 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:49:46,292 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,46175,1731671385223, seqNum=-1] 2024-11-15T11:49:46,293 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:49:46,294 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45447, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:49:46,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 761 msec 2024-11-15T11:49:46,301 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731671386301, completionTime=-1 2024-11-15T11:49:46,301 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T11:49:46,301 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-15T11:49:46,304 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-15T11:49:46,304 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731671446304 2024-11-15T11:49:46,304 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731671506304 2024-11-15T11:49:46,304 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-15T11:49:46,304 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45035,1731671385082-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:46,304 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45035,1731671385082-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:46,304 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45035,1731671385082-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:46,304 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7adf9b3d9d04:45035, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:46,305 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:46,305 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:46,307 DEBUG [master/7adf9b3d9d04:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T11:49:46,309 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.027sec 2024-11-15T11:49:46,309 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T11:49:46,309 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T11:49:46,309 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T11:49:46,309 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T11:49:46,309 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T11:49:46,309 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45035,1731671385082-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:49:46,309 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45035,1731671385082-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T11:49:46,311 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T11:49:46,311 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T11:49:46,311 INFO [master/7adf9b3d9d04:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7adf9b3d9d04,45035,1731671385082-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:49:46,348 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4457fb1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:49:46,348 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7adf9b3d9d04,45035,-1 for getting cluster id 2024-11-15T11:49:46,348 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:49:46,349 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c8ce42bb-c3bf-4e22-8e7e-91be3da95adc' 2024-11-15T11:49:46,350 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:49:46,350 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c8ce42bb-c3bf-4e22-8e7e-91be3da95adc" 2024-11-15T11:49:46,350 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cfe972c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:49:46,350 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7adf9b3d9d04,45035,-1] 2024-11-15T11:49:46,350 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:49:46,350 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:46,351 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45872, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:49:46,352 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ce49b8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:49:46,352 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:49:46,353 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7adf9b3d9d04,46175,1731671385223, seqNum=-1] 2024-11-15T11:49:46,353 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:49:46,354 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:49:46,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7adf9b3d9d04,45035,1731671385082 2024-11-15T11:49:46,356 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:49:46,359 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T11:49:46,359 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T11:49:46,361 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/test.com,8080,1, archiveDir=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/oldWALs, maxLogs=32 2024-11-15T11:49:46,361 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731671386361 2024-11-15T11:49:46,367 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/test.com,8080,1/test.com%2C8080%2C1.1731671386361 2024-11-15T11:49:46,370 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35833:35833),(127.0.0.1/127.0.0.1:43675:43675)] 2024-11-15T11:49:46,371 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731671386371 2024-11-15T11:49:46,379 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,379 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,379 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,379 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/test.com,8080,1/test.com%2C8080%2C1.1731671386361 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/test.com,8080,1/test.com%2C8080%2C1.1731671386371 2024-11-15T11:49:46,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741835_1011 (size=93) 2024-11-15T11:49:46,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741835_1011 (size=93) 2024-11-15T11:49:46,382 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43675:43675),(127.0.0.1/127.0.0.1:35833:35833)] 2024-11-15T11:49:46,382 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/test.com,8080,1/test.com%2C8080%2C1.1731671386361 is not closed yet, will try archiving it next time 2024-11-15T11:49:46,385 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,385 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,385 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/WALs/test.com,8080,1/test.com%2C8080%2C1.1731671386361 to hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/oldWALs/test.com%2C8080%2C1.1731671386361 2024-11-15T11:49:46,385 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,385 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,385 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741836_1012 (size=93) 2024-11-15T11:49:46,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741836_1012 (size=93) 2024-11-15T11:49:46,388 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/oldWALs 2024-11-15T11:49:46,388 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731671386371) 2024-11-15T11:49:46,389 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T11:49:46,389 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:49:46,389 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:49:46,389 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:46,389 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:46,389 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:49:46,389 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T11:49:46,389 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1995188693, stopped=false 2024-11-15T11:49:46,389 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7adf9b3d9d04,45035,1731671385082 2024-11-15T11:49:46,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:49:46,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:49:46,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:46,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:46,398 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:49:46,398 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:49:46,398 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:49:46,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:46,399 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:49:46,399 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:49:46,399 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7adf9b3d9d04,46175,1731671385223' ***** 2024-11-15T11:49:46,399 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:49:46,399 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:49:46,399 INFO [RS:0;7adf9b3d9d04:46175 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:49:46,399 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:49:46,399 INFO [RS:0;7adf9b3d9d04:46175 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:49:46,399 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(959): stopping server 7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:46,399 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:49:46,399 INFO [RS:0;7adf9b3d9d04:46175 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7adf9b3d9d04:46175. 2024-11-15T11:49:46,399 DEBUG [RS:0;7adf9b3d9d04:46175 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:49:46,399 DEBUG [RS:0;7adf9b3d9d04:46175 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:46,399 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:49:46,399 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:49:46,399 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:49:46,399 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T11:49:46,400 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T11:49:46,400 DEBUG [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-15T11:49:46,400 DEBUG [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-15T11:49:46,400 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:49:46,400 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:49:46,400 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:49:46,400 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:49:46,400 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:49:46,400 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-15T11:49:46,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,46507,1731671195095/7adf9b3d9d04%2C46507%2C1731671195095.meta.1731671196199.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:46,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40139/user/jenkins/test-data/3dbac7f8-21cd-12b9-48a5-06b54bc952b2/WALs/7adf9b3d9d04,39827,1731671196494/7adf9b3d9d04%2C39827%2C1731671196494.1731671196826 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T11:49:46,423 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740/.tmp/ns/b7cb297eaf7342adadef565802435d71 is 43, key is default/ns:d/1731671386295/Put/seqid=0 2024-11-15T11:49:46,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741837_1013 (size=5153) 2024-11-15T11:49:46,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741837_1013 (size=5153) 2024-11-15T11:49:46,427 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740/.tmp/ns/b7cb297eaf7342adadef565802435d71 2024-11-15T11:49:46,433 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740/.tmp/ns/b7cb297eaf7342adadef565802435d71 as hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740/ns/b7cb297eaf7342adadef565802435d71 2024-11-15T11:49:46,438 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740/ns/b7cb297eaf7342adadef565802435d71, entries=2, sequenceid=6, filesize=5.0 K 2024-11-15T11:49:46,439 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false 2024-11-15T11:49:46,443 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:49:46,444 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:49:46,444 INFO [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:49:46,444 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731671386400Running coprocessor pre-close hooks at 1731671386400Disabling compacts and flushes for region at 1731671386400Disabling writes for close at 1731671386400Obtaining lock to block concurrent updates at 1731671386400Preparing flush snapshotting stores in 1588230740 at 1731671386400Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731671386400Flushing stores of hbase:meta,,1.1588230740 at 1731671386401 (+1 ms)Flushing 1588230740/ns: creating writer at 1731671386401Flushing 1588230740/ns: appending metadata at 1731671386422 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1731671386422Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16954c68: reopening flushed file at 1731671386432 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false at 1731671386439 (+7 ms)Writing region close event to WAL at 1731671386440 (+1 ms)Running coprocessor post-close hooks at 1731671386444 (+4 ms)Closed at 1731671386444 2024-11-15T11:49:46,444 DEBUG [RS_CLOSE_META-regionserver/7adf9b3d9d04:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T11:49:46,600 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(976): stopping server 7adf9b3d9d04,46175,1731671385223; all regions closed. 2024-11-15T11:49:46,601 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,601 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,601 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,601 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,602 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741834_1010 (size=1152) 2024-11-15T11:49:46,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741834_1010 (size=1152) 2024-11-15T11:49:46,609 DEBUG [RS:0;7adf9b3d9d04:46175 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/oldWALs 2024-11-15T11:49:46,609 INFO [RS:0;7adf9b3d9d04:46175 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C46175%2C1731671385223.meta:.meta(num 1731671386219) 2024-11-15T11:49:46,609 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,610 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,610 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,610 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,610 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741833_1009 (size=93) 2024-11-15T11:49:46,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741833_1009 (size=93) 2024-11-15T11:49:46,614 DEBUG [RS:0;7adf9b3d9d04:46175 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/oldWALs 2024-11-15T11:49:46,615 INFO [RS:0;7adf9b3d9d04:46175 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7adf9b3d9d04%2C46175%2C1731671385223:(num 1731671385735) 2024-11-15T11:49:46,615 DEBUG [RS:0;7adf9b3d9d04:46175 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:49:46,615 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:49:46,615 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:49:46,615 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.ChoreService(370): Chore service for: regionserver/7adf9b3d9d04:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T11:49:46,615 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:49:46,615 INFO [regionserver/7adf9b3d9d04:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:49:46,615 INFO [RS:0;7adf9b3d9d04:46175 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46175 2024-11-15T11:49:46,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7adf9b3d9d04,46175,1731671385223 2024-11-15T11:49:46,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:49:46,631 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:49:46,640 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7adf9b3d9d04,46175,1731671385223] 2024-11-15T11:49:46,648 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7adf9b3d9d04,46175,1731671385223 already deleted, retry=false 2024-11-15T11:49:46,648 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7adf9b3d9d04,46175,1731671385223 expired; onlineServers=0 2024-11-15T11:49:46,648 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7adf9b3d9d04,45035,1731671385082' ***** 2024-11-15T11:49:46,648 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T11:49:46,648 INFO [M:0;7adf9b3d9d04:45035 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:49:46,648 INFO [M:0;7adf9b3d9d04:45035 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:49:46,648 DEBUG [M:0;7adf9b3d9d04:45035 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T11:49:46,648 DEBUG [M:0;7adf9b3d9d04:45035 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T11:49:46,648 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T11:49:46,648 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671385549 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.large.0-1731671385549,5,FailOnTimeoutGroup] 2024-11-15T11:49:46,648 DEBUG [master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671385549 {}] cleaner.HFileCleaner(306): Exit Thread[master/7adf9b3d9d04:0:becomeActiveMaster-HFileCleaner.small.0-1731671385549,5,FailOnTimeoutGroup] 2024-11-15T11:49:46,648 INFO [M:0;7adf9b3d9d04:45035 {}] hbase.ChoreService(370): Chore service for: master/7adf9b3d9d04:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T11:49:46,648 INFO [M:0;7adf9b3d9d04:45035 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:49:46,649 DEBUG [M:0;7adf9b3d9d04:45035 {}] master.HMaster(1795): Stopping service threads 2024-11-15T11:49:46,649 INFO [M:0;7adf9b3d9d04:45035 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T11:49:46,649 INFO [M:0;7adf9b3d9d04:45035 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:49:46,649 INFO [M:0;7adf9b3d9d04:45035 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T11:49:46,649 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T11:49:46,656 DEBUG [M:0;7adf9b3d9d04:45035 {}] zookeeper.ZKUtil(347): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T11:49:46,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T11:49:46,656 WARN [M:0;7adf9b3d9d04:45035 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T11:49:46,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:49:46,657 INFO [M:0;7adf9b3d9d04:45035 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/.lastflushedseqids 2024-11-15T11:49:46,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741838_1014 (size=99) 2024-11-15T11:49:46,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741838_1014 (size=99) 2024-11-15T11:49:46,664 INFO [M:0;7adf9b3d9d04:45035 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T11:49:46,664 INFO [M:0;7adf9b3d9d04:45035 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T11:49:46,664 DEBUG [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:49:46,664 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:46,665 DEBUG [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:46,665 DEBUG [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:49:46,665 DEBUG [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:46,665 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-15T11:49:46,683 DEBUG [M:0;7adf9b3d9d04:45035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/33d239c6050747f2959caaf72a350cd1 is 82, key is hbase:meta,,1/info:regioninfo/1731671386249/Put/seqid=0 2024-11-15T11:49:46,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741839_1015 (size=5672) 2024-11-15T11:49:46,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741839_1015 (size=5672) 2024-11-15T11:49:46,688 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/33d239c6050747f2959caaf72a350cd1 2024-11-15T11:49:46,704 DEBUG [M:0;7adf9b3d9d04:45035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/756519c3793747c6a768dd235baabdee is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731671386300/Put/seqid=0 2024-11-15T11:49:46,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741840_1016 (size=5275) 2024-11-15T11:49:46,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741840_1016 (size=5275) 2024-11-15T11:49:46,709 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/756519c3793747c6a768dd235baabdee 2024-11-15T11:49:46,725 DEBUG [M:0;7adf9b3d9d04:45035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76f077ff94014af1b44e8e793229f240 is 69, key is 7adf9b3d9d04,46175,1731671385223/rs:state/1731671385584/Put/seqid=0 2024-11-15T11:49:46,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741841_1017 (size=5156) 2024-11-15T11:49:46,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741841_1017 (size=5156) 2024-11-15T11:49:46,730 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76f077ff94014af1b44e8e793229f240 2024-11-15T11:49:46,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:49:46,740 INFO [RS:0;7adf9b3d9d04:46175 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:49:46,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46175-0x1013f9df6b30001, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:49:46,740 INFO [RS:0;7adf9b3d9d04:46175 {}] regionserver.HRegionServer(1031): Exiting; stopping=7adf9b3d9d04,46175,1731671385223; zookeeper connection closed. 2024-11-15T11:49:46,740 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@444f0a52 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@444f0a52 2024-11-15T11:49:46,741 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T11:49:46,748 DEBUG [M:0;7adf9b3d9d04:45035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2f1680e59a384e42973fde6b9d2ee4c9 is 52, key is load_balancer_on/state:d/1731671386358/Put/seqid=0 2024-11-15T11:49:46,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741842_1018 (size=5056) 2024-11-15T11:49:46,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741842_1018 (size=5056) 2024-11-15T11:49:46,752 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2f1680e59a384e42973fde6b9d2ee4c9 2024-11-15T11:49:46,756 DEBUG [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/33d239c6050747f2959caaf72a350cd1 as hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/33d239c6050747f2959caaf72a350cd1 2024-11-15T11:49:46,760 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/33d239c6050747f2959caaf72a350cd1, entries=8, sequenceid=29, filesize=5.5 K 2024-11-15T11:49:46,761 DEBUG [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/756519c3793747c6a768dd235baabdee as hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/756519c3793747c6a768dd235baabdee 2024-11-15T11:49:46,766 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/756519c3793747c6a768dd235baabdee, entries=3, sequenceid=29, filesize=5.2 K 2024-11-15T11:49:46,767 DEBUG [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76f077ff94014af1b44e8e793229f240 as hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/76f077ff94014af1b44e8e793229f240 2024-11-15T11:49:46,771 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/76f077ff94014af1b44e8e793229f240, entries=1, sequenceid=29, filesize=5.0 K 2024-11-15T11:49:46,772 DEBUG [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2f1680e59a384e42973fde6b9d2ee4c9 as hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2f1680e59a384e42973fde6b9d2ee4c9 2024-11-15T11:49:46,776 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46381/user/jenkins/test-data/ddf95e94-745a-ec88-a439-764a61583102/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2f1680e59a384e42973fde6b9d2ee4c9, entries=1, sequenceid=29, filesize=4.9 K 2024-11-15T11:49:46,777 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=29, compaction requested=false 2024-11-15T11:49:46,787 INFO [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:49:46,787 DEBUG [M:0;7adf9b3d9d04:45035 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731671386664Disabling compacts and flushes for region at 1731671386664Disabling writes for close at 1731671386665 (+1 ms)Obtaining lock to block concurrent updates at 1731671386665Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731671386665Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731671386665Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731671386666 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731671386666Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731671386683 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731671386683Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731671386691 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731671386704 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731671386704Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731671386712 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731671386725 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731671386725Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731671386733 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731671386747 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731671386747Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@691fbcc0: reopening flushed file at 1731671386755 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2af08987: reopening flushed file at 1731671386760 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68eea98: reopening flushed file at 1731671386766 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40f0e95e: reopening flushed file at 1731671386771 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=29, compaction requested=false at 1731671386777 (+6 ms)Writing region close event to WAL at 1731671386787 (+10 ms)Closed at 1731671386787 2024-11-15T11:49:46,788 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,788 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,788 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,788 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,788 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T11:49:46,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741830_1006 (size=10311) 2024-11-15T11:49:46,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36089 is added to blk_1073741830_1006 (size=10311) 2024-11-15T11:49:46,790 INFO [M:0;7adf9b3d9d04:45035 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T11:49:46,790 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:49:46,790 INFO [M:0;7adf9b3d9d04:45035 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45035 2024-11-15T11:49:46,791 INFO [M:0;7adf9b3d9d04:45035 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:49:46,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:49:46,923 INFO [M:0;7adf9b3d9d04:45035 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:49:46,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45035-0x1013f9df6b30000, quorum=127.0.0.1:60542, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:49:46,925 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@627a202d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:49:46,925 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22ed154c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:49:46,925 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:49:46,925 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bfe0bbd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:49:46,925 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49e6dd92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/hadoop.log.dir/,STOPPED} 2024-11-15T11:49:46,926 WARN [BP-493428621-172.17.0.2-1731671383497 heartbeating to localhost/127.0.0.1:46381 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:49:46,926 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:49:46,926 WARN [BP-493428621-172.17.0.2-1731671383497 heartbeating to localhost/127.0.0.1:46381 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-493428621-172.17.0.2-1731671383497 (Datanode Uuid 474af7d4-12d0-4d6b-b8e4-3c455c4cd94b) service to localhost/127.0.0.1:46381 2024-11-15T11:49:46,926 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:49:46,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/data/data3/current/BP-493428621-172.17.0.2-1731671383497 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:49:46,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/data/data4/current/BP-493428621-172.17.0.2-1731671383497 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:49:46,927 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:49:46,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9fc2daa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:49:46,929 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a1af98b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:49:46,929 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:49:46,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1040cecb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:49:46,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fcd61c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/hadoop.log.dir/,STOPPED} 2024-11-15T11:49:46,930 WARN [BP-493428621-172.17.0.2-1731671383497 heartbeating to localhost/127.0.0.1:46381 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:49:46,930 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:49:46,930 WARN [BP-493428621-172.17.0.2-1731671383497 heartbeating to localhost/127.0.0.1:46381 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-493428621-172.17.0.2-1731671383497 (Datanode Uuid 4669be31-4f25-40c6-a36f-8d4494be7cd8) service to localhost/127.0.0.1:46381 2024-11-15T11:49:46,930 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:49:46,931 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/data/data1/current/BP-493428621-172.17.0.2-1731671383497 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:49:46,931 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/cluster_82e1db33-2e1d-f954-c3c3-8d38560d1295/data/data2/current/BP-493428621-172.17.0.2-1731671383497 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:49:46,931 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:49:46,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61edd007{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:49:46,937 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d664f93{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:49:46,937 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:49:46,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c66b7d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:49:46,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b72d363{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6a2df6f-4f5c-ff76-618a-5a44beaf2c45/hadoop.log.dir/,STOPPED} 2024-11-15T11:49:46,944 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T11:49:46,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T11:49:46,970 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 232) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46381 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46381 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46381 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46381 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46381 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46381 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:46381 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46381 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=541 (was 518) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=202 (was 202), ProcessCount=11 (was 11), AvailableMemoryMB=11592 (was 10147) - AvailableMemoryMB LEAK? -